From 4ff29decaecf69ac9db4235be1ea38d22796dd10 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Tue, 25 Jan 2011 21:59:26 +0000 Subject: [PATCH] --- yaml --- r: 236821 b: refs/heads/master c: 5c77d8bb8aeb4ec6804b6c32061109ba2ea6988d h: refs/heads/master i: 236819: 40b3c9b792a1c1e7dc7e714dd2b22d9d9d56c07a v: v3 --- [refs] | 2 +- .../ABI/testing/sysfs-platform-at91 | 25 - .../Documentation/DocBook/device-drivers.tmpl | 4 +- .../feature-removal-schedule.txt | 7 - trunk/Documentation/networking/bonding.txt | 83 +- trunk/MAINTAINERS | 23 +- trunk/arch/arm/mach-omap1/Kconfig | 2 - trunk/arch/arm/mach-omap1/Makefile | 3 +- trunk/arch/arm/mach-omap1/time.c | 101 +- trunk/arch/arm/mach-omap1/timer32k.c | 13 +- trunk/arch/arm/mach-omap2/board-cm-t3517.c | 29 +- trunk/arch/arm/mach-omap2/board-devkit8000.c | 3 +- trunk/arch/arm/mach-omap2/clock44xx_data.c | 1 + trunk/arch/arm/mach-omap2/clockdomain.c | 30 +- .../arm/mach-omap2/clockdomains44xx_data.c | 2 + .../arm/mach-omap2/powerdomain2xxx_3xxx.c | 1 + trunk/arch/arm/mach-omap2/timer-gp.c | 10 +- trunk/arch/arm/plat-omap/Kconfig | 8 +- trunk/arch/arm/plat-omap/counter_32k.c | 22 +- trunk/arch/arm/plat-omap/dma.c | 7 +- .../arch/arm/plat-omap/include/plat/common.h | 3 - .../arch/powerpc/kernel/perf_event_fsl_emb.c | 1 - trunk/arch/x86/include/asm/cacheflush.h | 42 +- trunk/arch/x86/include/asm/cpu.h | 1 - trunk/arch/x86/include/asm/jump_label.h | 2 +- trunk/arch/x86/kernel/cpu/intel_cacheinfo.c | 3 - .../arch/x86/kernel/cpu/mcheck/therm_throt.c | 1 - trunk/arch/x86/kernel/process.c | 3 +- trunk/arch/x86/kernel/smpboot.c | 3 +- trunk/drivers/atm/idt77105.c | 2 +- trunk/drivers/bluetooth/ath3k.c | 75 +- trunk/drivers/char/tpm/tpm.c | 10 +- trunk/drivers/char/tpm/tpm_tis.c | 6 +- trunk/drivers/clocksource/acpi_pm.c | 6 +- trunk/drivers/idle/intel_idle.c | 8 +- trunk/drivers/net/arm/ks8695net.c | 2 +- trunk/drivers/net/atl1c/atl1c_hw.c | 15 +- trunk/drivers/net/atl1c/atl1c_hw.h | 43 +- trunk/drivers/net/atl1e/atl1e_ethtool.c | 12 +- trunk/drivers/net/atl1e/atl1e_hw.c | 34 +- trunk/drivers/net/atl1e/atl1e_hw.h | 111 +- trunk/drivers/net/atl1e/atl1e_main.c | 4 +- trunk/drivers/net/bnx2.c | 29 +- trunk/drivers/net/bnx2.h | 3 +- trunk/drivers/net/bnx2x/bnx2x.h | 6 +- trunk/drivers/net/bnx2x/bnx2x_hsi.h | 89 +- trunk/drivers/net/bnx2x/bnx2x_link.c | 2592 ++++++----- trunk/drivers/net/bnx2x/bnx2x_link.h | 34 +- trunk/drivers/net/bnx2x/bnx2x_main.c | 21 +- trunk/drivers/net/bnx2x/bnx2x_reg.h | 1 - trunk/drivers/net/bonding/bond_3ad.c | 4 - trunk/drivers/net/bonding/bond_alb.c | 4 - trunk/drivers/net/bonding/bond_main.c | 12 +- trunk/drivers/net/bonding/bond_sysfs.c | 4 +- trunk/drivers/net/can/Kconfig | 4 +- trunk/drivers/net/can/Makefile | 1 - trunk/drivers/net/can/at91_can.c | 138 +- trunk/drivers/net/can/softing/Kconfig | 30 - trunk/drivers/net/can/softing/Makefile | 6 - trunk/drivers/net/can/softing/softing.h | 167 - trunk/drivers/net/can/softing/softing_cs.c | 359 -- trunk/drivers/net/can/softing/softing_fw.c | 691 --- trunk/drivers/net/can/softing/softing_main.c | 893 ---- .../net/can/softing/softing_platform.h | 40 - trunk/drivers/net/cnic.c | 39 +- trunk/drivers/net/cnic.h | 2 +- trunk/drivers/net/cxgb4/cxgb4_main.c | 3 +- trunk/drivers/net/dl2k.c | 4 +- trunk/drivers/net/e1000e/e1000.h | 5 +- trunk/drivers/net/e1000e/ethtool.c | 52 +- trunk/drivers/net/e1000e/ich8lan.c | 3 +- trunk/drivers/net/e1000e/lib.c | 4 +- trunk/drivers/net/e1000e/netdev.c | 117 +- trunk/drivers/net/e1000e/phy.c | 8 +- trunk/drivers/net/enic/enic.h | 6 +- trunk/drivers/net/enic/enic_main.c | 10 +- trunk/drivers/net/hamradio/bpqether.c | 5 +- trunk/drivers/net/igb/e1000_82575.c | 1 - trunk/drivers/net/igb/e1000_hw.h | 1 - trunk/drivers/net/igb/igb_main.c | 1 - trunk/drivers/net/macvtap.c | 18 +- trunk/drivers/net/myri10ge/myri10ge.c | 4 +- trunk/drivers/net/pch_gbe/pch_gbe_main.c | 2 +- trunk/drivers/net/ppp_generic.c | 148 +- trunk/drivers/net/sfc/ethtool.c | 4 +- trunk/drivers/net/sfc/net_driver.h | 2 +- trunk/drivers/net/smc91x.c | 13 - trunk/drivers/net/sungem.c | 58 +- trunk/drivers/net/sungem.h | 1 + trunk/drivers/net/tg3.c | 258 +- trunk/drivers/net/tg3.h | 16 +- trunk/drivers/net/tlan.c | 3773 ++++++++--------- trunk/drivers/net/tlan.h | 192 +- trunk/drivers/net/tun.c | 2 +- trunk/drivers/net/typhoon.c | 3 +- trunk/drivers/net/usb/kaweth.c | 1 - trunk/drivers/net/veth.c | 12 + trunk/drivers/net/via-velocity.c | 9 - trunk/drivers/net/via-velocity.h | 8 +- trunk/drivers/net/vxge/vxge-config.c | 32 +- trunk/drivers/net/vxge/vxge-config.h | 10 - trunk/drivers/net/vxge/vxge-main.c | 216 +- trunk/drivers/net/vxge/vxge-main.h | 23 +- trunk/drivers/net/vxge/vxge-traffic.c | 116 +- trunk/drivers/net/vxge/vxge-traffic.h | 14 +- trunk/drivers/net/vxge/vxge-version.h | 4 +- trunk/drivers/net/wireless/ath/ar9170/main.c | 3 +- trunk/drivers/net/wireless/ath/ath.h | 2 - trunk/drivers/net/wireless/ath/ath5k/ahb.c | 7 +- trunk/drivers/net/wireless/ath/ath5k/base.c | 95 +- trunk/drivers/net/wireless/ath/ath5k/base.h | 3 + trunk/drivers/net/wireless/ath/ath5k/eeprom.c | 24 +- trunk/drivers/net/wireless/ath/ath5k/eeprom.h | 28 +- trunk/drivers/net/wireless/ath/ath5k/pci.c | 9 +- .../net/wireless/ath/ath9k/ar9002_calib.c | 5 +- .../net/wireless/ath/ath9k/ar9003_eeprom.c | 24 +- .../net/wireless/ath/ath9k/ar9003_mac.c | 8 +- .../net/wireless/ath/ath9k/ar9003_phy.h | 2 - trunk/drivers/net/wireless/ath/ath9k/ath9k.h | 47 +- trunk/drivers/net/wireless/ath/ath9k/beacon.c | 16 +- trunk/drivers/net/wireless/ath/ath9k/calib.c | 5 +- trunk/drivers/net/wireless/ath/ath9k/debug.c | 279 +- trunk/drivers/net/wireless/ath/ath9k/debug.h | 12 +- trunk/drivers/net/wireless/ath/ath9k/eeprom.c | 32 - trunk/drivers/net/wireless/ath/ath9k/eeprom.h | 2 - .../net/wireless/ath/ath9k/eeprom_4k.c | 41 +- .../net/wireless/ath/ath9k/eeprom_9287.c | 45 +- .../net/wireless/ath/ath9k/eeprom_def.c | 32 +- trunk/drivers/net/wireless/ath/ath9k/htc.h | 2 +- .../net/wireless/ath/ath9k/htc_drv_init.c | 29 - .../net/wireless/ath/ath9k/htc_drv_main.c | 9 +- trunk/drivers/net/wireless/ath/ath9k/hw.c | 27 +- trunk/drivers/net/wireless/ath/ath9k/hw.h | 4 - trunk/drivers/net/wireless/ath/ath9k/init.c | 10 +- trunk/drivers/net/wireless/ath/ath9k/mac.c | 14 +- trunk/drivers/net/wireless/ath/ath9k/main.c | 344 +- trunk/drivers/net/wireless/ath/ath9k/recv.c | 16 +- .../drivers/net/wireless/ath/ath9k/virtual.c | 48 + trunk/drivers/net/wireless/ath/ath9k/xmit.c | 166 +- .../net/wireless/ath/carl9170/carl9170.h | 1 - trunk/drivers/net/wireless/ath/carl9170/fw.c | 15 - .../drivers/net/wireless/ath/carl9170/fwcmd.h | 1 - .../net/wireless/ath/carl9170/fwdesc.h | 28 +- trunk/drivers/net/wireless/ath/carl9170/hw.h | 25 - .../drivers/net/wireless/ath/carl9170/main.c | 9 +- trunk/drivers/net/wireless/ath/carl9170/tx.c | 3 - .../net/wireless/ath/carl9170/version.h | 8 +- .../drivers/net/wireless/ath/carl9170/wlan.h | 20 +- trunk/drivers/net/wireless/iwlwifi/Kconfig | 26 +- trunk/drivers/net/wireless/iwlwifi/Makefile | 1 - trunk/drivers/net/wireless/iwlwifi/iwl-1000.c | 1 - trunk/drivers/net/wireless/iwlwifi/iwl-2000.c | 556 --- .../net/wireless/iwlwifi/iwl-3945-led.c | 27 + trunk/drivers/net/wireless/iwlwifi/iwl-3945.c | 5 +- trunk/drivers/net/wireless/iwlwifi/iwl-4965.c | 6 - trunk/drivers/net/wireless/iwlwifi/iwl-6000.c | 52 +- .../net/wireless/iwlwifi/iwl-agn-eeprom.c | 11 +- .../net/wireless/iwlwifi/iwl-agn-hcmd.c | 18 +- .../net/wireless/iwlwifi/iwl-agn-led.c | 14 +- .../net/wireless/iwlwifi/iwl-agn-led.h | 1 - .../net/wireless/iwlwifi/iwl-agn-lib.c | 51 +- .../net/wireless/iwlwifi/iwl-agn-rxon.c | 38 +- .../drivers/net/wireless/iwlwifi/iwl-agn-tx.c | 9 +- trunk/drivers/net/wireless/iwlwifi/iwl-agn.c | 177 +- trunk/drivers/net/wireless/iwlwifi/iwl-agn.h | 30 +- .../net/wireless/iwlwifi/iwl-commands.h | 6 - trunk/drivers/net/wireless/iwlwifi/iwl-core.c | 10 +- trunk/drivers/net/wireless/iwlwifi/iwl-core.h | 14 + trunk/drivers/net/wireless/iwlwifi/iwl-csr.h | 14 +- .../net/wireless/iwlwifi/iwl-debugfs.c | 106 +- trunk/drivers/net/wireless/iwlwifi/iwl-dev.h | 57 +- .../drivers/net/wireless/iwlwifi/iwl-eeprom.h | 26 +- trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c | 1 - trunk/drivers/net/wireless/iwlwifi/iwl-led.c | 201 +- trunk/drivers/net/wireless/iwlwifi/iwl-led.h | 16 +- .../drivers/net/wireless/iwlwifi/iwl-legacy.c | 4 + .../net/wireless/iwlwifi/iwl3945-base.c | 8 +- .../net/wireless/iwmc3200wifi/cfg80211.c | 3 +- trunk/drivers/net/wireless/iwmc3200wifi/rx.c | 7 +- trunk/drivers/net/wireless/libertas/cfg.c | 6 +- trunk/drivers/net/wireless/mac80211_hwsim.c | 3 +- trunk/drivers/net/wireless/mwl8k.c | 456 +- trunk/drivers/net/wireless/rt2x00/rt2800.h | 6 - trunk/drivers/net/wireless/rt2x00/rt2800lib.c | 34 +- trunk/drivers/net/wireless/rt2x00/rt2800lib.h | 3 +- trunk/drivers/net/wireless/rt2x00/rt2800pci.c | 36 +- trunk/drivers/net/wireless/rt2x00/rt2x00dev.c | 5 +- trunk/drivers/net/wireless/rt2x00/rt73usb.c | 1 - trunk/drivers/net/wireless/rtlwifi/core.c | 3 +- trunk/drivers/net/wireless/rtlwifi/pci.c | 11 +- trunk/drivers/net/wireless/wl1251/rx.c | 3 +- trunk/drivers/net/wireless/wl12xx/rx.c | 2 +- trunk/drivers/net/xen-netfront.c | 96 +- trunk/drivers/rapidio/rio-scan.c | 2 - trunk/drivers/rtc/Kconfig | 12 + trunk/drivers/rtc/interface.c | 61 +- trunk/fs/dcache.c | 4 +- trunk/include/asm-generic/vmlinux.lds.h | 7 - trunk/include/linux/Kbuild | 1 - trunk/include/linux/audit.h | 2 - trunk/include/linux/caif/Kbuild | 2 - trunk/include/linux/cpu_rmap.h | 73 - trunk/include/linux/dccp.h | 2 + trunk/include/linux/gfp.h | 2 +- trunk/include/linux/if_link.h | 1 - trunk/include/linux/interrupt.h | 33 +- trunk/include/linux/ip_vs.h | 8 - trunk/include/linux/irqdesc.h | 3 - trunk/include/linux/kernel.h | 32 +- trunk/include/linux/kmemcheck.h | 2 +- trunk/include/linux/module.h | 27 - trunk/include/linux/moduleparam.h | 6 +- trunk/include/linux/mroute.h | 1 - trunk/include/linux/netdevice.h | 141 +- trunk/include/linux/netfilter.h | 27 +- trunk/include/linux/netfilter/Kbuild | 2 - .../linux/netfilter/nf_conntrack_snmp.h | 9 - .../linux/netfilter/nfnetlink_conntrack.h | 9 - trunk/include/linux/netfilter/x_tables.h | 3 +- trunk/include/linux/netfilter/xt_AUDIT.h | 30 - trunk/include/linux/netfilter/xt_CT.h | 12 +- trunk/include/linux/netfilter/xt_NFQUEUE.h | 6 - .../include/linux/netfilter/xt_TCPOPTSTRIP.h | 4 +- trunk/include/linux/netfilter/xt_TPROXY.h | 10 +- trunk/include/linux/netfilter/xt_cluster.h | 10 +- trunk/include/linux/netfilter/xt_comment.h | 2 +- trunk/include/linux/netfilter/xt_connlimit.h | 16 +- trunk/include/linux/netfilter/xt_conntrack.h | 15 - trunk/include/linux/netfilter/xt_quota.h | 8 +- trunk/include/linux/netfilter/xt_socket.h | 2 - trunk/include/linux/netfilter/xt_time.h | 16 +- trunk/include/linux/netfilter/xt_u32.h | 18 +- .../linux/netfilter_bridge/ebt_802_3.h | 26 +- .../linux/netfilter_bridge/ebt_among.h | 4 +- .../include/linux/netfilter_bridge/ebt_arp.h | 6 +- trunk/include/linux/netfilter_bridge/ebt_ip.h | 14 +- .../include/linux/netfilter_bridge/ebt_ip6.h | 25 +- .../linux/netfilter_bridge/ebt_limit.h | 10 +- .../include/linux/netfilter_bridge/ebt_log.h | 8 +- .../linux/netfilter_bridge/ebt_mark_m.h | 6 +- .../linux/netfilter_bridge/ebt_nflog.h | 12 +- .../linux/netfilter_bridge/ebt_pkttype.h | 6 +- .../include/linux/netfilter_bridge/ebt_stp.h | 26 +- .../include/linux/netfilter_bridge/ebt_ulog.h | 4 +- .../include/linux/netfilter_bridge/ebt_vlan.h | 10 +- .../linux/netfilter_ipv4/ipt_CLUSTERIP.h | 16 +- trunk/include/linux/netfilter_ipv4/ipt_ECN.h | 8 +- trunk/include/linux/netfilter_ipv4/ipt_SAME.h | 8 +- trunk/include/linux/netfilter_ipv4/ipt_TTL.h | 6 +- .../linux/netfilter_ipv4/ipt_addrtype.h | 16 +- trunk/include/linux/netfilter_ipv4/ipt_ah.h | 6 +- trunk/include/linux/netfilter_ipv4/ipt_ecn.h | 10 +- trunk/include/linux/netfilter_ipv4/ipt_ttl.h | 6 +- trunk/include/linux/netfilter_ipv6/ip6t_HL.h | 6 +- .../linux/netfilter_ipv6/ip6t_REJECT.h | 4 +- trunk/include/linux/netfilter_ipv6/ip6t_ah.h | 10 +- .../include/linux/netfilter_ipv6/ip6t_frag.h | 10 +- trunk/include/linux/netfilter_ipv6/ip6t_hl.h | 6 +- .../linux/netfilter_ipv6/ip6t_ipv6header.h | 8 +- trunk/include/linux/netfilter_ipv6/ip6t_mh.h | 6 +- .../include/linux/netfilter_ipv6/ip6t_opts.h | 12 +- trunk/include/linux/netfilter_ipv6/ip6t_rt.h | 13 +- trunk/include/linux/pkt_sched.h | 12 - trunk/include/linux/rtc.h | 4 + trunk/include/linux/skbuff.h | 11 +- trunk/include/linux/virtio_config.h | 5 +- trunk/include/net/bluetooth/hci_core.h | 1 - trunk/include/net/cfg80211.h | 3 +- trunk/include/net/dst.h | 117 +- trunk/include/net/dst_ops.h | 1 - trunk/include/net/flow.h | 3 +- trunk/include/net/inet_sock.h | 8 +- trunk/include/net/inetpeer.h | 13 +- trunk/include/net/ip_fib.h | 8 +- trunk/include/net/ip_vs.h | 295 +- trunk/include/net/mac80211.h | 20 +- trunk/include/net/net_namespace.h | 2 - trunk/include/net/netfilter/nf_conntrack.h | 23 +- .../net/netfilter/nf_conntrack_ecache.h | 12 +- .../net/netfilter/nf_conntrack_extend.h | 10 - .../net/netfilter/nf_conntrack_helper.h | 6 - .../net/netfilter/nf_conntrack_l3proto.h | 2 +- .../net/netfilter/nf_conntrack_timestamp.h | 65 - trunk/include/net/netfilter/nf_nat.h | 6 - trunk/include/net/netfilter/nf_nat_core.h | 4 +- trunk/include/net/netns/conntrack.h | 4 +- trunk/include/net/netns/ip_vs.h | 143 - trunk/include/net/netns/ipv4.h | 1 + trunk/include/net/protocol.h | 4 +- trunk/include/net/route.h | 6 - trunk/include/net/sch_generic.h | 67 +- trunk/include/net/sock.h | 4 +- trunk/include/net/tcp.h | 2 +- trunk/include/net/udp.h | 2 +- trunk/kernel/audit.c | 2 - trunk/kernel/irq/manage.c | 82 - trunk/kernel/params.c | 65 +- trunk/kernel/perf_event.c | 46 +- trunk/kernel/sched_fair.c | 78 +- trunk/kernel/time/tick-sched.c | 7 +- trunk/lib/Kconfig | 4 - trunk/lib/Makefile | 2 - trunk/lib/cpu_rmap.c | 269 -- trunk/lib/textsearch.c | 10 +- trunk/net/8021q/vlan.c | 2 +- trunk/net/9p/trans_rdma.c | 1 + trunk/net/Kconfig | 6 - trunk/net/batman-adv/unicast.c | 5 +- trunk/net/batman-adv/vis.c | 14 +- trunk/net/bluetooth/hci_conn.c | 16 +- trunk/net/bluetooth/hci_core.c | 4 - trunk/net/bluetooth/hci_event.c | 9 +- trunk/net/bluetooth/l2cap.c | 84 +- trunk/net/bluetooth/rfcomm/core.c | 3 +- trunk/net/bridge/br_if.c | 4 +- trunk/net/bridge/br_private.h | 2 +- trunk/net/bridge/netfilter/ebt_ip6.c | 46 +- trunk/net/bridge/netfilter/ebtables.c | 1 - trunk/net/caif/cfcnfg.c | 2 + trunk/net/caif/cfdgml.c | 1 + trunk/net/caif/cfserl.c | 1 + trunk/net/caif/cfutill.c | 2 +- trunk/net/caif/cfveil.c | 2 +- trunk/net/core/dev.c | 255 +- trunk/net/core/dst.c | 39 - trunk/net/core/ethtool.c | 4 +- trunk/net/core/filter.c | 6 +- trunk/net/core/neighbour.c | 13 +- trunk/net/core/net-sysfs.c | 17 +- trunk/net/core/pktgen.c | 234 +- trunk/net/core/rtnetlink.c | 49 +- trunk/net/core/skbuff.c | 13 +- trunk/net/dcb/dcbnl.c | 13 +- trunk/net/decnet/dn_route.c | 18 +- trunk/net/decnet/dn_table.c | 1 + trunk/net/dsa/dsa.c | 2 +- trunk/net/econet/af_econet.c | 4 +- trunk/net/ipv4/Kconfig | 4 +- trunk/net/ipv4/af_inet.c | 18 +- trunk/net/ipv4/arp.c | 11 +- trunk/net/ipv4/fib_rules.c | 10 +- trunk/net/ipv4/fib_semantics.c | 22 +- trunk/net/ipv4/inetpeer.c | 3 +- trunk/net/ipv4/ip_input.c | 2 +- trunk/net/ipv4/ipmr.c | 46 - trunk/net/ipv4/netfilter/Kconfig | 3 +- trunk/net/ipv4/netfilter/arp_tables.c | 2 - trunk/net/ipv4/netfilter/ip_tables.c | 2 - trunk/net/ipv4/netfilter/ipt_CLUSTERIP.c | 7 +- trunk/net/ipv4/netfilter/ipt_LOG.c | 3 +- trunk/net/ipv4/netfilter/iptable_mangle.c | 2 +- .../nf_conntrack_l3proto_ipv4_compat.c | 17 +- trunk/net/ipv4/netfilter/nf_nat_amanda.c | 8 +- trunk/net/ipv4/netfilter/nf_nat_core.c | 33 +- trunk/net/ipv4/netfilter/nf_nat_snmp_basic.c | 9 +- trunk/net/ipv4/raw.c | 19 - trunk/net/ipv4/route.c | 97 +- trunk/net/ipv4/tcp.c | 2 +- trunk/net/ipv4/tcp_input.c | 2 +- trunk/net/ipv4/tcp_ipv4.c | 1 + trunk/net/ipv4/udp.c | 2 +- trunk/net/ipv4/xfrm4_policy.c | 4 - trunk/net/ipv6/addrconf.c | 81 +- trunk/net/ipv6/af_inet6.c | 2 +- trunk/net/ipv6/netfilter/ip6_tables.c | 2 - trunk/net/ipv6/netfilter/ip6t_LOG.c | 3 +- trunk/net/ipv6/netfilter/nf_conntrack_reasm.c | 3 +- trunk/net/ipv6/raw.c | 14 +- trunk/net/ipv6/route.c | 57 +- trunk/net/ipv6/sit.c | 23 +- trunk/net/ipv6/udp.c | 2 +- trunk/net/ipv6/xfrm6_policy.c | 8 - trunk/net/mac80211/agg-rx.c | 7 +- trunk/net/mac80211/agg-tx.c | 23 +- trunk/net/mac80211/driver-ops.h | 6 +- trunk/net/mac80211/driver-trace.h | 213 +- trunk/net/mac80211/ibss.c | 3 +- trunk/net/mac80211/main.c | 1 - trunk/net/mac80211/mesh.c | 4 +- trunk/net/mac80211/mlme.c | 16 +- trunk/net/mac80211/rx.c | 27 +- trunk/net/mac80211/scan.c | 3 +- trunk/net/mac80211/sta_info.h | 2 - trunk/net/mac80211/tx.c | 20 +- trunk/net/netfilter/Kconfig | 43 +- trunk/net/netfilter/Makefile | 4 - trunk/net/netfilter/core.c | 20 +- trunk/net/netfilter/ipvs/ip_vs_app.c | 98 +- trunk/net/netfilter/ipvs/ip_vs_conn.c | 195 +- trunk/net/netfilter/ipvs/ip_vs_core.c | 376 +- trunk/net/netfilter/ipvs/ip_vs_ctl.c | 891 ++-- trunk/net/netfilter/ipvs/ip_vs_est.c | 134 +- trunk/net/netfilter/ipvs/ip_vs_ftp.c | 61 +- trunk/net/netfilter/ipvs/ip_vs_lblc.c | 67 +- trunk/net/netfilter/ipvs/ip_vs_lblcr.c | 72 +- trunk/net/netfilter/ipvs/ip_vs_nfct.c | 6 +- trunk/net/netfilter/ipvs/ip_vs_pe.c | 17 +- trunk/net/netfilter/ipvs/ip_vs_pe_sip.c | 3 - trunk/net/netfilter/ipvs/ip_vs_proto.c | 129 +- trunk/net/netfilter/ipvs/ip_vs_proto_ah_esp.c | 45 +- trunk/net/netfilter/ipvs/ip_vs_proto_sctp.c | 153 +- trunk/net/netfilter/ipvs/ip_vs_proto_tcp.c | 142 +- trunk/net/netfilter/ipvs/ip_vs_proto_udp.c | 110 +- trunk/net/netfilter/ipvs/ip_vs_sync.c | 1239 ++---- trunk/net/netfilter/ipvs/ip_vs_xmit.c | 26 +- trunk/net/netfilter/nf_conntrack_broadcast.c | 82 - trunk/net/netfilter/nf_conntrack_core.c | 57 +- trunk/net/netfilter/nf_conntrack_expect.c | 34 +- trunk/net/netfilter/nf_conntrack_extend.c | 11 +- trunk/net/netfilter/nf_conntrack_helper.c | 20 +- trunk/net/netfilter/nf_conntrack_netbios_ns.c | 74 +- trunk/net/netfilter/nf_conntrack_netlink.c | 47 +- trunk/net/netfilter/nf_conntrack_proto.c | 24 +- trunk/net/netfilter/nf_conntrack_proto_dccp.c | 3 - trunk/net/netfilter/nf_conntrack_proto_sctp.c | 1 - trunk/net/netfilter/nf_conntrack_proto_tcp.c | 14 +- trunk/net/netfilter/nf_conntrack_snmp.c | 77 - trunk/net/netfilter/nf_conntrack_standalone.c | 45 +- trunk/net/netfilter/nf_conntrack_timestamp.c | 120 - trunk/net/netfilter/nf_log.c | 6 +- trunk/net/netfilter/nf_queue.c | 82 +- trunk/net/netfilter/nfnetlink_log.c | 6 +- trunk/net/netfilter/nfnetlink_queue.c | 22 +- trunk/net/netfilter/x_tables.c | 98 +- trunk/net/netfilter/xt_AUDIT.c | 204 - trunk/net/netfilter/xt_CLASSIFY.c | 36 +- trunk/net/netfilter/xt_IDLETIMER.c | 2 - trunk/net/netfilter/xt_LED.c | 2 - trunk/net/netfilter/xt_NFQUEUE.c | 34 +- trunk/net/netfilter/xt_connlimit.c | 62 +- trunk/net/netfilter/xt_conntrack.c | 75 +- trunk/net/netfilter/xt_cpu.c | 2 - trunk/net/netfilter/xt_iprange.c | 18 +- trunk/net/netfilter/xt_ipvs.c | 2 +- trunk/net/packet/af_packet.c | 7 +- trunk/net/rds/rds.h | 1 + trunk/net/sched/Kconfig | 17 +- trunk/net/sched/Makefile | 1 - trunk/net/sched/act_api.c | 46 +- trunk/net/sched/act_csum.c | 2 +- trunk/net/sched/act_gact.c | 8 +- trunk/net/sched/act_ipt.c | 16 +- trunk/net/sched/act_mirred.c | 4 +- trunk/net/sched/act_nat.c | 2 +- trunk/net/sched/act_pedit.c | 10 +- trunk/net/sched/act_police.c | 9 +- trunk/net/sched/act_simple.c | 10 +- trunk/net/sched/act_skbedit.c | 8 +- trunk/net/sched/cls_api.c | 33 +- trunk/net/sched/cls_basic.c | 17 +- trunk/net/sched/cls_cgroup.c | 8 +- trunk/net/sched/cls_flow.c | 6 +- trunk/net/sched/cls_fw.c | 38 +- trunk/net/sched/cls_route.c | 126 +- trunk/net/sched/cls_rsvp.h | 95 +- trunk/net/sched/cls_tcindex.c | 2 +- trunk/net/sched/cls_u32.c | 77 +- trunk/net/sched/em_cmp.c | 47 +- trunk/net/sched/em_meta.c | 44 +- trunk/net/sched/em_nbyte.c | 3 +- trunk/net/sched/em_text.c | 3 +- trunk/net/sched/em_u32.c | 2 +- trunk/net/sched/ematch.c | 37 +- trunk/net/sched/sch_api.c | 169 +- trunk/net/sched/sch_atm.c | 16 +- trunk/net/sched/sch_cbq.c | 365 +- trunk/net/sched/sch_drr.c | 2 +- trunk/net/sched/sch_dsmark.c | 23 +- trunk/net/sched/sch_fifo.c | 27 +- trunk/net/sched/sch_generic.c | 40 +- trunk/net/sched/sch_gred.c | 85 +- trunk/net/sched/sch_hfsc.c | 39 +- trunk/net/sched/sch_htb.c | 118 +- trunk/net/sched/sch_mq.c | 1 + trunk/net/sched/sch_mqprio.c | 416 -- trunk/net/sched/sch_multiq.c | 10 +- trunk/net/sched/sch_netem.c | 11 +- trunk/net/sched/sch_prio.c | 36 +- trunk/net/sched/sch_red.c | 72 +- trunk/net/sched/sch_sfq.c | 71 +- trunk/net/sched/sch_tbf.c | 41 +- trunk/net/sched/sch_teql.c | 39 +- trunk/net/unix/af_unix.c | 66 +- trunk/net/wanrouter/wanmain.c | 2 + trunk/net/wireless/reg.c | 6 +- trunk/net/wireless/util.c | 36 +- trunk/net/wireless/wext-compat.c | 5 +- trunk/security/keys/Makefile | 4 +- .../keys/{encrypted.c => encrypted_defined.c} | 3 +- .../keys/{encrypted.h => encrypted_defined.h} | 0 .../keys/{trusted.c => trusted_defined.c} | 3 +- .../keys/{trusted.h => trusted_defined.h} | 0 trunk/security/selinux/ss/conditional.c | 2 +- trunk/security/selinux/ss/policydb.c | 4 +- trunk/tools/perf/Makefile | 9 +- trunk/tools/perf/builtin-annotate.c | 6 +- trunk/tools/perf/builtin-kmem.c | 4 +- trunk/tools/perf/builtin-lock.c | 6 +- trunk/tools/perf/builtin-record.c | 2 +- trunk/tools/perf/builtin-report.c | 2 +- trunk/tools/perf/builtin-sched.c | 20 +- trunk/tools/perf/builtin-script.c | 6 +- trunk/tools/perf/builtin-stat.c | 4 +- trunk/tools/perf/builtin-test.c | 54 +- trunk/tools/perf/builtin-top.c | 9 +- trunk/tools/perf/util/event.c | 5 +- trunk/tools/perf/util/header.c | 4 +- trunk/tools/perf/util/hist.c | 17 +- trunk/tools/perf/util/include/linux/bitops.h | 1 - trunk/tools/perf/util/map.c | 3 +- trunk/tools/perf/util/parse-events.c | 2 +- trunk/tools/perf/util/parse-events.h | 2 +- trunk/tools/perf/util/probe-event.c | 2 +- trunk/tools/perf/util/session.c | 28 +- trunk/tools/perf/util/svghelper.c | 9 +- trunk/tools/perf/util/symbol.c | 16 +- trunk/tools/perf/util/types.h | 10 +- trunk/tools/perf/util/ui/browsers/hists.c | 2 +- trunk/tools/perf/util/ui/browsers/map.c | 5 +- trunk/tools/perf/util/values.c | 10 +- 520 files changed, 8474 insertions(+), 18078 deletions(-) delete mode 100644 trunk/Documentation/ABI/testing/sysfs-platform-at91 delete mode 100644 trunk/drivers/net/can/softing/Kconfig delete mode 100644 trunk/drivers/net/can/softing/Makefile delete mode 100644 trunk/drivers/net/can/softing/softing.h delete mode 100644 trunk/drivers/net/can/softing/softing_cs.c delete mode 100644 trunk/drivers/net/can/softing/softing_fw.c delete mode 100644 trunk/drivers/net/can/softing/softing_main.c delete mode 100644 trunk/drivers/net/can/softing/softing_platform.h delete mode 100644 trunk/drivers/net/wireless/iwlwifi/iwl-2000.c delete mode 100644 trunk/include/linux/caif/Kbuild delete mode 100644 trunk/include/linux/cpu_rmap.h delete mode 100644 trunk/include/linux/netfilter/nf_conntrack_snmp.h delete mode 100644 trunk/include/linux/netfilter/xt_AUDIT.h delete mode 100644 trunk/include/net/netfilter/nf_conntrack_timestamp.h delete mode 100644 trunk/include/net/netns/ip_vs.h delete mode 100644 trunk/lib/cpu_rmap.c delete mode 100644 trunk/net/netfilter/nf_conntrack_broadcast.c delete mode 100644 trunk/net/netfilter/nf_conntrack_snmp.c delete mode 100644 trunk/net/netfilter/nf_conntrack_timestamp.c delete mode 100644 trunk/net/netfilter/xt_AUDIT.c delete mode 100644 trunk/net/sched/sch_mqprio.c rename trunk/security/keys/{encrypted.c => encrypted_defined.c} (99%) rename trunk/security/keys/{encrypted.h => encrypted_defined.h} (100%) rename trunk/security/keys/{trusted.c => trusted_defined.c} (99%) rename trunk/security/keys/{trusted.h => trusted_defined.h} (100%) diff --git a/[refs] b/[refs] index 2f9d647f215f..0b07cfcc260b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6b28ff3be829a851378551245fd6b3f9bf93b0ad +refs/heads/master: 5c77d8bb8aeb4ec6804b6c32061109ba2ea6988d diff --git a/trunk/Documentation/ABI/testing/sysfs-platform-at91 b/trunk/Documentation/ABI/testing/sysfs-platform-at91 deleted file mode 100644 index 4cc6a865ae66..000000000000 --- a/trunk/Documentation/ABI/testing/sysfs-platform-at91 +++ /dev/null @@ -1,25 +0,0 @@ -What: /sys/devices/platform/at91_can/net//mb0_id -Date: January 2011 -KernelVersion: 2.6.38 -Contact: Marc Kleine-Budde -Description: - Value representing the can_id of mailbox 0. - - Default: 0x7ff (standard frame) - - Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in - "AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the - contents of mailbox 0 may be send under certain - conditions (even if disabled or in rx mode). - - The workaround in the errata suggests not to use the - mailbox and load it with an unused identifier. - - In order to use an extended can_id add the - CAN_EFF_FLAG (0x80000000U) to the can_id. Example: - - - standard id 0x7ff: - echo 0x7ff > /sys/class/net/can0/mb0_id - - - extended id 0x1fffffff: - echo 0x9fffffff > /sys/class/net/can0/mb0_id diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl index 36f63d4a0a06..35447e081736 100644 --- a/trunk/Documentation/DocBook/device-drivers.tmpl +++ b/trunk/Documentation/DocBook/device-drivers.tmpl @@ -217,8 +217,8 @@ X!Isound/sound_firmware.c 16x50 UART Driver !Iinclude/linux/serial_core.h -!Edrivers/tty/serial/serial_core.c -!Edrivers/tty/serial/8250.c +!Edrivers/serial/serial_core.c +!Edrivers/serial/8250.c diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index ccb6048415b2..b959659c5df4 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -603,10 +603,3 @@ Why: The adm9240, w83792d and w83793 hardware monitoring drivers have Who: Jean Delvare ---------------------------- - -What: xt_connlimit rev 0 -When: 2012 -Who: Jan Engelhardt -Files: net/netfilter/xt_connlimit.c - ----------------------------- diff --git a/trunk/Documentation/networking/bonding.txt b/trunk/Documentation/networking/bonding.txt index 25d2f4141d27..5dc638791d97 100644 --- a/trunk/Documentation/networking/bonding.txt +++ b/trunk/Documentation/networking/bonding.txt @@ -49,8 +49,7 @@ Table of Contents 3.3 Configuring Bonding Manually with Ifenslave 3.3.1 Configuring Multiple Bonds Manually 3.4 Configuring Bonding Manually via Sysfs -3.5 Configuration with Interfaces Support -3.6 Overriding Configuration for Special Cases +3.5 Overriding Configuration for Special Cases 4. Querying Bonding Configuration 4.1 Bonding Configuration @@ -162,8 +161,8 @@ onwards) do not have /usr/include/linux symbolically linked to the default kernel source include directory. SECOND IMPORTANT NOTE: - If you plan to configure bonding using sysfs or using the -/etc/network/interfaces file, you do not need to use ifenslave. + If you plan to configure bonding using sysfs, you do not need +to use ifenslave. 2. Bonding Driver Options ========================= @@ -780,26 +779,22 @@ resend_igmp You can configure bonding using either your distro's network initialization scripts, or manually using either ifenslave or the -sysfs interface. Distros generally use one of three packages for the -network initialization scripts: initscripts, sysconfig or interfaces. -Recent versions of these packages have support for bonding, while older +sysfs interface. Distros generally use one of two packages for the +network initialization scripts: initscripts or sysconfig. Recent +versions of these packages have support for bonding, while older versions do not. We will first describe the options for configuring bonding for -distros using versions of initscripts, sysconfig and interfaces with full -or partial support for bonding, then provide information on enabling +distros using versions of initscripts and sysconfig with full or +partial support for bonding, then provide information on enabling bonding without support from the network initialization scripts (i.e., older versions of initscripts or sysconfig). - If you're unsure whether your distro uses sysconfig, -initscripts or interfaces, or don't know if it's new enough, have no fear. + If you're unsure whether your distro uses sysconfig or +initscripts, or don't know if it's new enough, have no fear. Determining this is fairly straightforward. - First, look for a file called interfaces in /etc/network directory. -If this file is present in your system, then your system use interfaces. See -Configuration with Interfaces Support. - - Else, issue the command: + First, issue the command: $ rpm -qf /sbin/ifup @@ -1332,62 +1327,8 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval echo +eth2 > /sys/class/net/bond1/bonding/slaves echo +eth3 > /sys/class/net/bond1/bonding/slaves -3.5 Configuration with Interfaces Support ------------------------------------------ - - This section applies to distros which use /etc/network/interfaces file -to describe network interface configuration, most notably Debian and it's -derivatives. - - The ifup and ifdown commands on Debian don't support bonding out of -the box. The ifenslave-2.6 package should be installed to provide bonding -support. Once installed, this package will provide bond-* options to be used -into /etc/network/interfaces. - - Note that ifenslave-2.6 package will load the bonding module and use -the ifenslave command when appropriate. - -Example Configurations ----------------------- - -In /etc/network/interfaces, the following stanza will configure bond0, in -active-backup mode, with eth0 and eth1 as slaves. - -auto bond0 -iface bond0 inet dhcp - bond-slaves eth0 eth1 - bond-mode active-backup - bond-miimon 100 - bond-primary eth0 eth1 - -If the above configuration doesn't work, you might have a system using -upstart for system startup. This is most notably true for recent -Ubuntu versions. The following stanza in /etc/network/interfaces will -produce the same result on those systems. - -auto bond0 -iface bond0 inet dhcp - bond-slaves none - bond-mode active-backup - bond-miimon 100 - -auto eth0 -iface eth0 inet manual - bond-master bond0 - bond-primary eth0 eth1 - -auto eth1 -iface eth1 inet manual - bond-master bond0 - bond-primary eth0 eth1 - -For a full list of bond-* supported options in /etc/network/interfaces and some -more advanced examples tailored to you particular distros, see the files in -/usr/share/doc/ifenslave-2.6. - -3.6 Overriding Configuration for Special Cases +3.5 Overriding Configuration for Special Cases ---------------------------------------------- - When using the bonding driver, the physical port which transmits a frame is typically selected by the bonding driver, and is not relevant to the user or system administrator. The output port is simply selected using the policies of diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 9d12977b6baf..55592f8b672c 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3327,6 +3327,7 @@ F: drivers/net/wimax/i2400m/ F: include/linux/wimax/i2400m.h INTEL WIRELESS WIFI LINK (iwlwifi) +M: Reinette Chatre M: Wey-Yi Guy M: Intel Linux Wireless L: linux-wireless@vger.kernel.org @@ -3673,28 +3674,6 @@ F: include/linux/key-type.h F: include/keys/ F: security/keys/ -KEYS-TRUSTED -M: David Safford -M: Mimi Zohar -L: linux-security-module@vger.kernel.org -L: keyrings@linux-nfs.org -S: Supported -F: Documentation/keys-trusted-encrypted.txt -F: include/keys/trusted-type.h -F: security/keys/trusted.c -F: security/keys/trusted.h - -KEYS-ENCRYPTED -M: Mimi Zohar -M: David Safford -L: linux-security-module@vger.kernel.org -L: keyrings@linux-nfs.org -S: Supported -F: Documentation/keys-trusted-encrypted.txt -F: include/keys/encrypted-type.h -F: security/keys/encrypted.c -F: security/keys/encrypted.h - KGDB / KDB /debug_core M: Jason Wessel W: http://kgdb.wiki.kernel.org/ diff --git a/trunk/arch/arm/mach-omap1/Kconfig b/trunk/arch/arm/mach-omap1/Kconfig index e0a028161dde..8d2f2daba0c0 100644 --- a/trunk/arch/arm/mach-omap1/Kconfig +++ b/trunk/arch/arm/mach-omap1/Kconfig @@ -9,7 +9,6 @@ config ARCH_OMAP730 depends on ARCH_OMAP1 bool "OMAP730 Based System" select CPU_ARM926T - select OMAP_MPU_TIMER select ARCH_OMAP_OTG config ARCH_OMAP850 @@ -23,7 +22,6 @@ config ARCH_OMAP15XX default y bool "OMAP15xx Based System" select CPU_ARM925T - select OMAP_MPU_TIMER config ARCH_OMAP16XX depends on ARCH_OMAP1 diff --git a/trunk/arch/arm/mach-omap1/Makefile b/trunk/arch/arm/mach-omap1/Makefile index ba6009f27677..6ee19504845f 100644 --- a/trunk/arch/arm/mach-omap1/Makefile +++ b/trunk/arch/arm/mach-omap1/Makefile @@ -3,11 +3,12 @@ # # Common support -obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o +obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o obj-y += clock.o clock_data.o opp_data.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o +obj-$(CONFIG_OMAP_MPU_TIMER) += time.o obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o # Power Management diff --git a/trunk/arch/arm/mach-omap1/time.c b/trunk/arch/arm/mach-omap1/time.c index f83fc335c613..ed7a61ff916a 100644 --- a/trunk/arch/arm/mach-omap1/time.c +++ b/trunk/arch/arm/mach-omap1/time.c @@ -44,21 +44,16 @@ #include #include #include -#include #include #include #include #include -#include - #include #include #include -#ifdef CONFIG_OMAP_MPU_TIMER - #define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE #define OMAP_MPU_TIMER_OFFSET 0x100 @@ -72,7 +67,7 @@ typedef struct { ((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \ (n)*OMAP_MPU_TIMER_OFFSET)) -static inline unsigned long notrace omap_mpu_timer_read(int nr) +static inline unsigned long omap_mpu_timer_read(int nr) { volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr); return timer->read_tim; @@ -217,32 +212,6 @@ static struct clocksource clocksource_mpu = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static DEFINE_CLOCK_DATA(cd); - -static inline unsigned long long notrace _omap_mpu_sched_clock(void) -{ - u32 cyc = mpu_read(&clocksource_mpu); - return cyc_to_sched_clock(&cd, cyc, (u32)~0); -} - -#ifndef CONFIG_OMAP_32K_TIMER -unsigned long long notrace sched_clock(void) -{ - return _omap_mpu_sched_clock(); -} -#else -static unsigned long long notrace omap_mpu_sched_clock(void) -{ - return _omap_mpu_sched_clock(); -} -#endif - -static void notrace mpu_update_sched_clock(void) -{ - u32 cyc = mpu_read(&clocksource_mpu); - update_sched_clock(&cd, cyc, (u32)~0); -} - static void __init omap_init_clocksource(unsigned long rate) { static char err[] __initdata = KERN_ERR @@ -250,13 +219,17 @@ static void __init omap_init_clocksource(unsigned long rate) setup_irq(INT_TIMER2, &omap_mpu_timer2_irq); omap_mpu_timer_start(1, ~0, 1); - init_sched_clock(&cd, mpu_update_sched_clock, 32, rate); if (clocksource_register_hz(&clocksource_mpu, rate)) printk(err, clocksource_mpu.name); } -static void __init omap_mpu_timer_init(void) +/* + * --------------------------------------------------------------------------- + * Timer initialization + * --------------------------------------------------------------------------- + */ +static void __init omap_timer_init(void) { struct clk *ck_ref = clk_get(NULL, "ck_ref"); unsigned long rate; @@ -273,66 +246,6 @@ static void __init omap_mpu_timer_init(void) omap_init_clocksource(rate); } -#else -static inline void omap_mpu_timer_init(void) -{ - pr_err("Bogus timer, should not happen\n"); -} -#endif /* CONFIG_OMAP_MPU_TIMER */ - -#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER) -static unsigned long long (*preferred_sched_clock)(void); - -unsigned long long notrace sched_clock(void) -{ - if (!preferred_sched_clock) - return 0; - - return preferred_sched_clock(); -} - -static inline void preferred_sched_clock_init(bool use_32k_sched_clock) -{ - if (use_32k_sched_clock) - preferred_sched_clock = omap_32k_sched_clock; - else - preferred_sched_clock = omap_mpu_sched_clock; -} -#else -static inline void preferred_sched_clock_init(bool use_32k_sched_clcok) -{ -} -#endif - -static inline int omap_32k_timer_usable(void) -{ - int res = false; - - if (cpu_is_omap730() || cpu_is_omap15xx()) - return res; - -#ifdef CONFIG_OMAP_32K_TIMER - res = omap_32k_timer_init(); -#endif - - return res; -} - -/* - * --------------------------------------------------------------------------- - * Timer initialization - * --------------------------------------------------------------------------- - */ -static void __init omap_timer_init(void) -{ - if (omap_32k_timer_usable()) { - preferred_sched_clock_init(1); - } else { - omap_mpu_timer_init(); - preferred_sched_clock_init(0); - } -} - struct sys_timer omap_timer = { .init = omap_timer_init, }; diff --git a/trunk/arch/arm/mach-omap1/timer32k.c b/trunk/arch/arm/mach-omap1/timer32k.c index 13d7b8f145bd..20cfbcc6c60c 100644 --- a/trunk/arch/arm/mach-omap1/timer32k.c +++ b/trunk/arch/arm/mach-omap1/timer32k.c @@ -52,9 +52,10 @@ #include #include #include -#include #include +struct sys_timer omap_timer; + /* * --------------------------------------------------------------------------- * 32KHz OS timer @@ -180,14 +181,14 @@ static __init void omap_init_32k_timer(void) * Timer initialization * --------------------------------------------------------------------------- */ -bool __init omap_32k_timer_init(void) +static void __init omap_timer_init(void) { - omap_init_clocksource_32k(); - #ifdef CONFIG_OMAP_DM_TIMER omap_dm_timer_init(); #endif omap_init_32k_timer(); - - return true; } + +struct sys_timer omap_timer = { + .init = omap_timer_init, +}; diff --git a/trunk/arch/arm/mach-omap2/board-cm-t3517.c b/trunk/arch/arm/mach-omap2/board-cm-t3517.c index 8f9a64d650ee..5b0c77732dfc 100644 --- a/trunk/arch/arm/mach-omap2/board-cm-t3517.c +++ b/trunk/arch/arm/mach-omap2/board-cm-t3517.c @@ -124,9 +124,8 @@ static inline void cm_t3517_init_hecc(void) {} #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) #define RTC_IO_GPIO (153) #define RTC_WR_GPIO (154) -#define RTC_RD_GPIO (53) +#define RTC_RD_GPIO (160) #define RTC_CS_GPIO (163) -#define RTC_CS_EN_GPIO (160) struct v3020_platform_data cm_t3517_v3020_pdata = { .use_gpio = 1, @@ -146,16 +145,6 @@ static struct platform_device cm_t3517_rtc_device = { static void __init cm_t3517_init_rtc(void) { - int err; - - err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en"); - if (err) { - pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err); - return; - } - - gpio_direction_output(RTC_CS_EN_GPIO, 1); - platform_device_register(&cm_t3517_rtc_device); } #else @@ -225,12 +214,12 @@ static struct mtd_partition cm_t3517_nand_partitions[] = { }, { .name = "linux", - .offset = MTDPART_OFS_APPEND, /* Offset = 0x2A0000 */ + .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */ .size = 32 * NAND_BLOCK_SIZE, }, { .name = "rootfs", - .offset = MTDPART_OFS_APPEND, /* Offset = 0x6A0000 */ + .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */ .size = MTDPART_SIZ_FULL, }, }; @@ -267,19 +256,11 @@ static void __init cm_t3517_init_irq(void) static struct omap_board_mux board_mux[] __initdata = { /* GPIO186 - Green LED */ OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), - - /* RTC GPIOs: */ - /* IO - GPIO153 */ + /* RTC GPIOs: IO, WR#, RD#, CS# */ OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - /* WR# - GPIO154 */ OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - /* RD# - GPIO53 */ - OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - /* CS# - GPIO163 */ - OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - /* CS EN - GPIO160 */ OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), - + OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), /* HSUSB1 RESET */ OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* HSUSB2 RESET */ diff --git a/trunk/arch/arm/mach-omap2/board-devkit8000.c b/trunk/arch/arm/mach-omap2/board-devkit8000.c index e906e05bb41b..00bb1fc5e017 100644 --- a/trunk/arch/arm/mach-omap2/board-devkit8000.c +++ b/trunk/arch/arm/mach-omap2/board-devkit8000.c @@ -275,7 +275,8 @@ static struct twl4030_gpio_platform_data devkit8000_gpio_data = { .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .use_leds = true, - .pulldowns = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13) + .pullups = BIT(1), + .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13) | BIT(15) | BIT(16) | BIT(17), .setup = devkit8000_twl_gpio_setup, }; diff --git a/trunk/arch/arm/mach-omap2/clock44xx_data.c b/trunk/arch/arm/mach-omap2/clock44xx_data.c index de9ec8ddd2ae..e8cb32fd7f13 100644 --- a/trunk/arch/arm/mach-omap2/clock44xx_data.c +++ b/trunk/arch/arm/mach-omap2/clock44xx_data.c @@ -34,6 +34,7 @@ #include "cm2_44xx.h" #include "cm-regbits-44xx.h" #include "prm44xx.h" +#include "prm44xx.h" #include "prm-regbits-44xx.h" #include "control.h" #include "scrm44xx.h" diff --git a/trunk/arch/arm/mach-omap2/clockdomain.c b/trunk/arch/arm/mach-omap2/clockdomain.c index 58e42f76603f..e20b98636ab4 100644 --- a/trunk/arch/arm/mach-omap2/clockdomain.c +++ b/trunk/arch/arm/mach-omap2/clockdomain.c @@ -423,12 +423,6 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; - if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { - pr_err("clockdomain: %s/%s: %s: not yet implemented\n", - clkdm1->name, clkdm2->name, __func__); - return -EINVAL; - } - if (!clkdm1 || !clkdm2) return -EINVAL; @@ -464,12 +458,6 @@ int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; - if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { - pr_err("clockdomain: %s/%s: %s: not yet implemented\n", - clkdm1->name, clkdm2->name, __func__); - return -EINVAL; - } - if (!clkdm1 || !clkdm2) return -EINVAL; @@ -512,12 +500,6 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) if (!clkdm1 || !clkdm2) return -EINVAL; - if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { - pr_err("clockdomain: %s/%s: %s: not yet implemented\n", - clkdm1->name, clkdm2->name, __func__); - return -EINVAL; - } - cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) { pr_debug("clockdomain: hardware cannot set/clear wake up of " @@ -545,12 +527,6 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm) struct clkdm_dep *cd; u32 mask = 0; - if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) { - pr_err("clockdomain: %s: %s: not yet implemented\n", - clkdm->name, __func__); - return -EINVAL; - } - if (!clkdm) return -EINVAL; @@ -854,7 +830,8 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm) * dependency code and data for OMAP4. */ if (cpu_is_omap44xx()) { - pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name); + WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency " + "support is not yet implemented\n"); } else { if (atomic_read(&clkdm->usecount) > 0) _clkdm_add_autodeps(clkdm); @@ -895,7 +872,8 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm) * dependency code and data for OMAP4. */ if (cpu_is_omap44xx()) { - pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name); + WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency " + "support is not yet implemented\n"); } else { if (atomic_read(&clkdm->usecount) > 0) _clkdm_del_autodeps(clkdm); diff --git a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c index 10622c914abc..51920fc7fc52 100644 --- a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c @@ -30,6 +30,8 @@ #include "cm1_44xx.h" #include "cm2_44xx.h" +#include "cm1_44xx.h" +#include "cm2_44xx.h" #include "cm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" diff --git a/trunk/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/trunk/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c index cf600e22bf8e..d5233890370c 100644 --- a/trunk/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c +++ b/trunk/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c @@ -19,6 +19,7 @@ #include #include "powerdomain.h" +#include "prm-regbits-34xx.h" #include "prm.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" diff --git a/trunk/arch/arm/mach-omap2/timer-gp.c b/trunk/arch/arm/mach-omap2/timer-gp.c index 7b7c2683ae7b..4e48e786bec7 100644 --- a/trunk/arch/arm/mach-omap2/timer-gp.c +++ b/trunk/arch/arm/mach-omap2/timer-gp.c @@ -42,8 +42,6 @@ #include "timer-gp.h" -#include - /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */ #define MAX_GPTIMER_ID 12 @@ -178,14 +176,10 @@ static void __init omap2_gp_clockevent_init(void) /* * When 32k-timer is enabled, don't use GPTimer for clocksource * instead, just leave default clocksource which uses the 32k - * sync counter. See clocksource setup in plat-omap/counter_32k.c + * sync counter. See clocksource setup in see plat-omap/common.c. */ -static void __init omap2_gp_clocksource_init(void) -{ - omap_init_clocksource_32k(); -} - +static inline void __init omap2_gp_clocksource_init(void) {} #else /* * clocksource diff --git a/trunk/arch/arm/plat-omap/Kconfig b/trunk/arch/arm/plat-omap/Kconfig index b6333ae3f92a..18fe3cb195dc 100644 --- a/trunk/arch/arm/plat-omap/Kconfig +++ b/trunk/arch/arm/plat-omap/Kconfig @@ -144,9 +144,12 @@ config OMAP_IOMMU_DEBUG config OMAP_IOMMU_IVA2 bool +choice + prompt "System timer" + default OMAP_32K_TIMER if !ARCH_OMAP15XX + config OMAP_MPU_TIMER bool "Use mpu timer" - depends on ARCH_OMAP1 help Select this option if you want to use the OMAP mpu timer. This timer provides more intra-tick resolution than the 32KHz timer, @@ -155,7 +158,6 @@ config OMAP_MPU_TIMER config OMAP_32K_TIMER bool "Use 32KHz timer" depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS - default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS) help Select this option if you want to enable the OMAP 32KHz timer. This timer saves power compared to the OMAP_MPU_TIMER, and has @@ -163,6 +165,8 @@ config OMAP_32K_TIMER intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is currently only available for OMAP16XX, 24XX, 34XX and OMAP4. +endchoice + config OMAP3_L2_AUX_SECURE_SAVE_RESTORE bool "OMAP3 HS/EMU save and restore for L2 AUX control register" depends on ARCH_OMAP3 && PM diff --git a/trunk/arch/arm/plat-omap/counter_32k.c b/trunk/arch/arm/plat-omap/counter_32k.c index 862dda95d61d..ea4644021fb9 100644 --- a/trunk/arch/arm/plat-omap/counter_32k.c +++ b/trunk/arch/arm/plat-omap/counter_32k.c @@ -36,6 +36,8 @@ #define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410 +#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) + #include /* @@ -120,24 +122,12 @@ static DEFINE_CLOCK_DATA(cd); #define SC_MULT 4000000000u #define SC_SHIFT 17 -static inline unsigned long long notrace _omap_32k_sched_clock(void) +unsigned long long notrace sched_clock(void) { u32 cyc = clocksource_32k.read(&clocksource_32k); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); } -#ifndef CONFIG_OMAP_MPU_TIMER -unsigned long long notrace sched_clock(void) -{ - return _omap_32k_sched_clock(); -} -#else -unsigned long long notrace omap_32k_sched_clock(void) -{ - return _omap_32k_sched_clock(); -} -#endif - static void notrace omap_update_sched_clock(void) { u32 cyc = clocksource_32k.read(&clocksource_32k); @@ -170,7 +160,7 @@ void read_persistent_clock(struct timespec *ts) *ts = *tsp; } -int __init omap_init_clocksource_32k(void) +static int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; @@ -205,3 +195,7 @@ int __init omap_init_clocksource_32k(void) } return 0; } +arch_initcall(omap_init_clocksource_32k); + +#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */ + diff --git a/trunk/arch/arm/plat-omap/dma.c b/trunk/arch/arm/plat-omap/dma.c index 85363084cc1a..c4b2b478b1a5 100644 --- a/trunk/arch/arm/plat-omap/dma.c +++ b/trunk/arch/arm/plat-omap/dma.c @@ -53,7 +53,7 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED }; #endif #define OMAP_DMA_ACTIVE 0x01 -#define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff +#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) @@ -1873,7 +1873,7 @@ static int omap2_dma_handle_ch(int ch) printk(KERN_INFO "DMA misaligned error with device %d\n", dma_chan[ch].dev_id); - p->dma_write(status, CSR, ch); + p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch); p->dma_write(1 << ch, IRQSTATUS_L0, ch); /* read back the register to flush the write */ p->dma_read(IRQSTATUS_L0, ch); @@ -1893,9 +1893,10 @@ static int omap2_dma_handle_ch(int ch) OMAP_DMA_CHAIN_INCQHEAD(chain_id); status = p->dma_read(CSR, ch); - p->dma_write(status, CSR, ch); } + p->dma_write(status, CSR, ch); + if (likely(dma_chan[ch].callback != NULL)) dma_chan[ch].callback(ch, status, dma_chan[ch].data); diff --git a/trunk/arch/arm/plat-omap/include/plat/common.h b/trunk/arch/arm/plat-omap/include/plat/common.h index 29b2afb4288f..6b8088ec74af 100644 --- a/trunk/arch/arm/plat-omap/include/plat/common.h +++ b/trunk/arch/arm/plat-omap/include/plat/common.h @@ -35,9 +35,6 @@ struct sys_timer; extern void omap_map_common_io(void); extern struct sys_timer omap_timer; -extern bool omap_32k_timer_init(void); -extern int __init omap_init_clocksource_32k(void); -extern unsigned long long notrace omap_32k_sched_clock(void); extern void omap_reserve(void); diff --git a/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c b/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c index b0dc8f7069cd..4dcf5f831e9d 100644 --- a/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -596,7 +596,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (left <= 0) left = period; record = 1; - event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; diff --git a/trunk/arch/x86/include/asm/cacheflush.h b/trunk/arch/x86/include/asm/cacheflush.h index 62f084478f7e..63e35ec9075c 100644 --- a/trunk/arch/x86/include/asm/cacheflush.h +++ b/trunk/arch/x86/include/asm/cacheflush.h @@ -1,8 +1,48 @@ #ifndef _ASM_X86_CACHEFLUSH_H #define _ASM_X86_CACHEFLUSH_H +/* Keep includes the same across arches. */ +#include + /* Caches aren't brain-dead on the intel. */ -#include +static inline void flush_cache_all(void) { } +static inline void flush_cache_mm(struct mm_struct *mm) { } +static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { } +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, unsigned long pfn) { } +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +static inline void flush_dcache_page(struct page *page) { } +static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +static inline void flush_icache_range(unsigned long start, + unsigned long end) { } +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) { } +static inline void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, + unsigned long len) { } +static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +static inline void flush_cache_vunmap(unsigned long start, + unsigned long end) { } + +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); +} #ifdef CONFIG_X86_PAT /* diff --git a/trunk/arch/x86/include/asm/cpu.h b/trunk/arch/x86/include/asm/cpu.h index 6e6e7558e702..4fab24de26b1 100644 --- a/trunk/arch/x86/include/asm/cpu.h +++ b/trunk/arch/x86/include/asm/cpu.h @@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); -int __cpuinit mwait_usable(const struct cpuinfo_x86 *); #endif /* _ASM_X86_CPU_H */ diff --git a/trunk/arch/x86/include/asm/jump_label.h b/trunk/arch/x86/include/asm/jump_label.h index 574dbc22893a..f52d42e80585 100644 --- a/trunk/arch/x86/include/asm/jump_label.h +++ b/trunk/arch/x86/include/asm/jump_label.h @@ -14,7 +14,7 @@ do { \ asm goto("1:" \ JUMP_LABEL_INITIAL_NOP \ - ".pushsection __jump_table, \"aw\" \n\t"\ + ".pushsection __jump_table, \"a\" \n\t"\ _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ ".popsection \n\t" \ : : "i" (key) : : label); \ diff --git a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a7b8ef..7283e98deaae 100644 --- a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -45,7 +45,6 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ - { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ @@ -67,7 +66,6 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ - { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ @@ -89,7 +87,6 @@ static const struct _cache_table __cpuinitconst cache_table[] = { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ - { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97f..e12246ff5aa6 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -59,7 +59,6 @@ struct thermal_state { /* Callback to handle core threshold interrupts */ int (*platform_thermal_notify)(__u64 msr_val); -EXPORT_SYMBOL(platform_thermal_notify); static DEFINE_PER_CPU(struct thermal_state, thermal_state); diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index e764fc05d700..d8286ed54ffa 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -506,7 +505,7 @@ static void poll_idle(void) #define MWAIT_ECX_EXTENDED_INFO 0x01 #define MWAIT_EDX_C1 0xf0 -int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) +static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 0cbe8c0b35ed..763df77343dd 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -1402,9 +1402,8 @@ static inline void mwait_play_dead(void) unsigned int highest_subcstate = 0; int i; void *mwait_ptr; - struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); - if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) + if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) return; if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) return; diff --git a/trunk/drivers/atm/idt77105.c b/trunk/drivers/atm/idt77105.c index 487a54739854..bca9cb89a118 100644 --- a/trunk/drivers/atm/idt77105.c +++ b/trunk/drivers/atm/idt77105.c @@ -151,7 +151,7 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int spin_unlock_irqrestore(&idt77105_priv_lock, flags); if (arg == NULL) return 0; - return copy_to_user(arg, &stats, + return copy_to_user(arg, &PRIV(dev)->stats, sizeof(struct idt77105_stats)) ? -EFAULT : 0; } diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index a126e614601f..949ed09c6361 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -47,40 +47,46 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 -static int ath3k_load_firmware(struct usb_device *udev, - const struct firmware *firmware) +struct ath3k_data { + struct usb_device *udev; + u8 *fw_data; + u32 fw_size; + u32 fw_sent; +}; + +static int ath3k_load_firmware(struct ath3k_data *data, + unsigned char *firmware, + int count) { u8 *send_buf; int err, pipe, len, size, sent = 0; - int count = firmware->size; - BT_DBG("udev %p", udev); + BT_DBG("ath3k %p udev %p", data, data->udev); - pipe = usb_sndctrlpipe(udev, 0); + pipe = usb_sndctrlpipe(data->udev, 0); - send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); - if (!send_buf) { - BT_ERR("Can't allocate memory chunk for firmware"); - return -ENOMEM; - } - - memcpy(send_buf, firmware->data, 20); - if ((err = usb_control_msg(udev, pipe, + if ((usb_control_msg(data->udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, - send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) { + firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) { BT_ERR("Can't change to loading configuration err"); - goto error; + return -EBUSY; } sent += 20; count -= 20; + send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); + if (!send_buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + while (count) { size = min_t(uint, count, BULK_SIZE); - pipe = usb_sndbulkpipe(udev, 0x02); - memcpy(send_buf, firmware->data + sent, size); + pipe = usb_sndbulkpipe(data->udev, 0x02); + memcpy(send_buf, firmware + sent, size); - err = usb_bulk_msg(udev, pipe, send_buf, size, + err = usb_bulk_msg(data->udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { @@ -106,28 +112,57 @@ static int ath3k_probe(struct usb_interface *intf, { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); + struct ath3k_data *data; + int size; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = udev; + if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { + kfree(data); return -EIO; } - if (ath3k_load_firmware(udev, firmware)) { + size = max_t(uint, firmware->size, 4096); + data->fw_data = kmalloc(size, GFP_KERNEL); + if (!data->fw_data) { release_firmware(firmware); - return -EIO; + kfree(data); + return -ENOMEM; } + + memcpy(data->fw_data, firmware->data, firmware->size); + data->fw_size = firmware->size; + data->fw_sent = 0; release_firmware(firmware); + usb_set_intfdata(intf, data); + if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) { + usb_set_intfdata(intf, NULL); + kfree(data->fw_data); + kfree(data); + return -EIO; + } + return 0; } static void ath3k_disconnect(struct usb_interface *intf) { + struct ath3k_data *data = usb_get_intfdata(intf); + BT_DBG("ath3k_disconnect intf %p", intf); + + kfree(data->fw_data); + kfree(data); } static struct usb_driver ath3k_driver = { diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c index 36e0fa161c2b..1f46f1cd9225 100644 --- a/trunk/drivers/char/tpm/tpm.c +++ b/trunk/drivers/char/tpm/tpm.c @@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; - if (duration_idx != TPM_UNDEFINED) { + if (duration_idx != TPM_UNDEFINED) duration = chip->vendor.duration[duration_idx]; - /* if duration is 0, it's because chip->vendor.duration wasn't */ - /* filled yet, so we set the lowest timeout just to give enough */ - /* time for tpm_get_timeouts() to succeed */ - return (duration <= 0 ? HZ : duration); - } else + if (duration <= 0) return 2 * 60 * HZ; + else + return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); diff --git a/trunk/drivers/char/tpm/tpm_tis.c b/trunk/drivers/char/tpm/tpm_tis.c index dd21df55689d..c17a305ecb28 100644 --- a/trunk/drivers/char/tpm/tpm_tis.c +++ b/trunk/drivers/char/tpm/tpm_tis.c @@ -493,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); + if (is_itpm(to_pnp_dev(dev))) + itpm = 1; + if (itpm) dev_info(dev, "Intel iTPM workaround enabled\n"); @@ -634,9 +637,6 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, else interrupts = 0; - if (is_itpm(pnp_dev)) - itpm = 1; - return tpm_tis_init(&pnp_dev->dev, start, len, irq); } diff --git a/trunk/drivers/clocksource/acpi_pm.c b/trunk/drivers/clocksource/acpi_pm.c index effe7974aa9a..cfb0f5278415 100644 --- a/trunk/drivers/clocksource/acpi_pm.c +++ b/trunk/drivers/clocksource/acpi_pm.c @@ -202,21 +202,17 @@ static int __init init_acpi_pm_clocksource(void) printk(KERN_INFO "PM-Timer had inconsistent results:" " 0x%#llx, 0x%#llx - aborting.\n", value1, value2); - pmtmr_ioport = 0; return -EINVAL; } if (i == ACPI_PM_READ_CHECKS) { printk(KERN_INFO "PM-Timer failed consistency check " " (0x%#llx) - aborting.\n", value1); - pmtmr_ioport = 0; return -ENODEV; } } - if (verify_pmtmr_rate() != 0){ - pmtmr_ioport = 0; + if (verify_pmtmr_rate() != 0) return -ENODEV; - } return clocksource_register_hz(&clocksource_acpi_pm, PMTMR_TICKS_PER_SEC); diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index 1fa091e05690..7acb32e7f817 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -263,7 +263,7 @@ static void __setup_broadcast_timer(void *arg) clockevents_notify(reason, &cpu); } -static int setup_broadcast_cpuhp_notify(struct notifier_block *n, +static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -273,11 +273,15 @@ static int setup_broadcast_cpuhp_notify(struct notifier_block *n, smp_call_function_single(hotcpu, __setup_broadcast_timer, (void *)true, 1); break; + case CPU_DOWN_PREPARE: + smp_call_function_single(hotcpu, __setup_broadcast_timer, + (void *)false, 1); + break; } return NOTIFY_OK; } -static struct notifier_block setup_broadcast_notifier = { +static struct notifier_block __cpuinitdata setup_broadcast_notifier = { .notifier_call = setup_broadcast_cpuhp_notify, }; diff --git a/trunk/drivers/net/arm/ks8695net.c b/trunk/drivers/net/arm/ks8695net.c index aa07657744c3..62d6f88cbab5 100644 --- a/trunk/drivers/net/arm/ks8695net.c +++ b/trunk/drivers/net/arm/ks8695net.c @@ -1644,7 +1644,7 @@ ks8695_cleanup(void) module_init(ks8695_init); module_exit(ks8695_cleanup); -MODULE_AUTHOR("Simtec Electronics"); +MODULE_AUTHOR("Simtec Electronics") MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" MODULENAME); diff --git a/trunk/drivers/net/atl1c/atl1c_hw.c b/trunk/drivers/net/atl1c/atl1c_hw.c index 23f2ab0f2fa8..1bf672009948 100644 --- a/trunk/drivers/net/atl1c/atl1c_hw.c +++ b/trunk/drivers/net/atl1c/atl1c_hw.c @@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) */ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) { - u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & ~GIGA_CR_1000T_SPEED_MASK; @@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) } if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || - atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) + atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) return -1; return 0; } @@ -517,18 +517,19 @@ int atl1c_phy_init(struct atl1c_hw *hw) "Error Setting up Auto-Negotiation\n"); return ret_val; } - mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; break; case MEDIA_TYPE_100M_FULL: - mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; + mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; break; case MEDIA_TYPE_100M_HALF: - mii_bmcr_data |= BMCR_SPEED100; + mii_bmcr_data |= BMCR_SPEED_100; break; case MEDIA_TYPE_10M_FULL: - mii_bmcr_data |= BMCR_FULLDPLX; + mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; break; case MEDIA_TYPE_10M_HALF: + mii_bmcr_data |= BMCR_SPEED_10; break; default: if (netif_msg_link(adapter)) @@ -656,7 +657,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw) err = atl1c_phy_setup_adv(hw); if (err) return err; - mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); } diff --git a/trunk/drivers/net/atl1c/atl1c_hw.h b/trunk/drivers/net/atl1c/atl1c_hw.h index 655fc6c4a8a4..3dd675979aa1 100644 --- a/trunk/drivers/net/atl1c/atl1c_hw.h +++ b/trunk/drivers/net/atl1c/atl1c_hw.h @@ -736,16 +736,55 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define REG_DEBUG_DATA0 0x1900 #define REG_DEBUG_DATA1 0x1904 +/* PHY Control Register */ +#define MII_BMCR 0x00 +#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define BMCR_POWER_DOWN 0x0800 /* Power down */ +#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define BMCR_SPEED_MASK 0x2040 +#define BMCR_SPEED_1000 0x0040 +#define BMCR_SPEED_100 0x2000 +#define BMCR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_BMSR 0x01 +#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +#define MII_PHYSID1 0x02 +#define MII_PHYSID2 0x03 #define L1D_MPW_PHYID1 0xD01C /* V7 */ #define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ #define L1D_MPW_PHYID3 0xD01E /* V8 */ /* Autoneg Advertisement Register */ -#define ADVERTISE_DEFAULT_CAP \ - (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM) +#define MII_ADVERTISE 0x04 +#define ADVERTISE_SPEED_MASK 0x01E0 +#define ADVERTISE_DEFAULT_CAP 0x0DE0 /* 1000BASE-T Control Register */ +#define MII_GIGA_CR 0x09 #define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ #define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ diff --git a/trunk/drivers/net/atl1e/atl1e_ethtool.c b/trunk/drivers/net/atl1e/atl1e_ethtool.c index 1209297433b8..6943a6c3b948 100644 --- a/trunk/drivers/net/atl1e/atl1e_ethtool.c +++ b/trunk/drivers/net/atl1e/atl1e_ethtool.c @@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev, ecmd->advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; - adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; + adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; if (hw->autoneg_advertised & ADVERTISE_10_HALF) - adv4 |= ADVERTISE_10HALF; + adv4 |= MII_AR_10T_HD_CAPS; if (hw->autoneg_advertised & ADVERTISE_10_FULL) - adv4 |= ADVERTISE_10FULL; + adv4 |= MII_AR_10T_FD_CAPS; if (hw->autoneg_advertised & ADVERTISE_100_HALF) - adv4 |= ADVERTISE_100HALF; + adv4 |= MII_AR_100TX_HD_CAPS; if (hw->autoneg_advertised & ADVERTISE_100_FULL) - adv4 |= ADVERTISE_100FULL; + adv4 |= MII_AR_100TX_FD_CAPS; if (hw->autoneg_advertised & ADVERTISE_1000_FULL) - adv9 |= ADVERTISE_1000FULL; + adv9 |= MII_AT001_CR_1000T_FD_CAPS; if (adv4 != hw->mii_autoneg_adv_reg || adv9 != hw->mii_1000t_ctrl_reg) { diff --git a/trunk/drivers/net/atl1e/atl1e_hw.c b/trunk/drivers/net/atl1e/atl1e_hw.c index 923063d2e5bb..76cc043def8c 100644 --- a/trunk/drivers/net/atl1e/atl1e_hw.c +++ b/trunk/drivers/net/atl1e/atl1e_hw.c @@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T control Register (Address 9). */ - mii_autoneg_adv_reg &= ~ADVERTISE_ALL; + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; /* @@ -327,37 +327,44 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: - mii_autoneg_adv_reg |= ADVERTISE_ALL; - hw->autoneg_advertised = ADVERTISE_ALL; + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + hw->autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL | + ADVERTISE_100_HALF | + ADVERTISE_100_FULL; if (hw->nic_type == athr_l1e) { - mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + mii_1000t_ctrl_reg |= + MII_AT001_CR_1000T_FD_CAPS; hw->autoneg_advertised |= ADVERTISE_1000_FULL; } break; case MEDIA_TYPE_100M_FULL: - mii_autoneg_adv_reg |= ADVERTISE_100FULL; + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; hw->autoneg_advertised = ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_HALF: - mii_autoneg_adv_reg |= ADVERTISE_100_HALF; + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; hw->autoneg_advertised = ADVERTISE_100_HALF; break; case MEDIA_TYPE_10M_FULL: - mii_autoneg_adv_reg |= ADVERTISE_10_FULL; + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; hw->autoneg_advertised = ADVERTISE_10_FULL; break; default: - mii_autoneg_adv_reg |= ADVERTISE_10_HALF; + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; hw->autoneg_advertised = ADVERTISE_10_HALF; break; } /* flow control fixed to enable all */ - mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; @@ -367,7 +374,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) return ret_val; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { - ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, + ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg); if (ret_val) return ret_val; @@ -390,7 +397,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw) int ret_val; u16 phy_data; - phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { @@ -638,14 +645,15 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw) return err; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { - err = atl1e_write_phy_reg(hw, MII_CTRL1000, + err = atl1e_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); if (err) return err; } err = atl1e_write_phy_reg(hw, MII_BMCR, - BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); + MII_CR_RESET | MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); return err; } diff --git a/trunk/drivers/net/atl1e/atl1e_hw.h b/trunk/drivers/net/atl1e/atl1e_hw.h index 74df16aef793..5ea2f4d86cfa 100644 --- a/trunk/drivers/net/atl1e/atl1e_hw.h +++ b/trunk/drivers/net/atl1e/atl1e_hw.h @@ -629,24 +629,127 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); /***************************** MII definition ***************************************/ /* PHY Common Register */ +#define MII_BMCR 0x00 +#define MII_BMSR 0x01 +#define MII_PHYSID1 0x02 +#define MII_PHYSID2 0x03 +#define MII_ADVERTISE 0x04 +#define MII_LPA 0x05 +#define MII_EXPANSION 0x06 +#define MII_AT001_CR 0x09 +#define MII_AT001_SR 0x0A +#define MII_AT001_ESR 0x0F #define MII_AT001_PSCR 0x10 #define MII_AT001_PSSR 0x11 #define MII_INT_CTRL 0x12 #define MII_INT_STATUS 0x13 #define MII_SMARTSPEED 0x14 +#define MII_RERRCOUNTER 0x15 +#define MII_SREVISION 0x16 +#define MII_RESV1 0x17 #define MII_LBRERROR 0x18 +#define MII_PHYADDR 0x19 #define MII_RESV2 0x1a +#define MII_TPISTATUS 0x1b +#define MII_NCONFIG 0x1c #define MII_DBG_ADDR 0x1D #define MII_DBG_DATA 0x1E + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register. */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + /* Autoneg Advertisement Register */ -#define MII_AR_DEFAULT_CAP_MASK 0 +#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 /* 1000BASE-T Control Register */ -#define MII_AT001_CR_1000T_SPEED_MASK \ - (ADVERTISE_1000FULL | ADVERTISE_1000HALF) -#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK +#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ +/* 0=DTE device */ +#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ +/* 0=Configure PHY as Slave */ +#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ +/* 0=Automatic Master/Slave config */ +#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define MII_AT001_CR_1000T_SPEED_MASK 0x0300 +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ /* AT001 PHY Specific Control Register */ #define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ diff --git a/trunk/drivers/net/atl1e/atl1e_main.c b/trunk/drivers/net/atl1e/atl1e_main.c index bf7500ccd73f..e28f8baf394e 100644 --- a/trunk/drivers/net/atl1e/atl1e_main.c +++ b/trunk/drivers/net/atl1e/atl1e_main.c @@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); - mii_advertise_data = ADVERTISE_10HALF; + mii_advertise_data = MII_AR_10T_HD_CAPS; - if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || + if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || (atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_advertise_data) != 0) || (atl1e_phy_commit(hw)) != 0) { diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index 2a961b7f7e17..df99edf3464a 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -435,8 +435,7 @@ bnx2_cnic_stop(struct bnx2 *bp) struct cnic_ctl_info info; mutex_lock(&bp->cnic_lock); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_lock)); + c_ops = bp->cnic_ops; if (c_ops) { info.cmd = CNIC_CTL_STOP_CMD; c_ops->cnic_ctl(bp->cnic_data, &info); @@ -451,8 +450,7 @@ bnx2_cnic_start(struct bnx2 *bp) struct cnic_ctl_info info; mutex_lock(&bp->cnic_lock); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_lock)); + c_ops = bp->cnic_ops; if (c_ops) { if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; @@ -7555,10 +7553,6 @@ bnx2_set_flags(struct net_device *dev, u32 data) !(data & ETH_FLAG_RXVLAN)) return -EINVAL; - /* TSO with VLAN tag won't work with current firmware */ - if (!(data & ETH_FLAG_TXVLAN)) - return -EINVAL; - rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); if (rc) @@ -7968,8 +7962,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) /* AER (Advanced Error Reporting) hooks */ err = pci_enable_pcie_error_reporting(pdev); - if (!err) - bp->flags |= BNX2_FLAG_AER_ENABLED; + if (err) { + dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " + "failed 0x%x\n", err); + /* non-fatal, continue */ + } } else { bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); @@ -8232,10 +8229,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) return 0; err_out_unmap: - if (bp->flags & BNX2_FLAG_AER_ENABLED) { + if (bp->flags & BNX2_FLAG_PCIE) pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } if (bp->regview) { iounmap(bp->regview); @@ -8317,7 +8312,7 @@ static const struct net_device_ops bnx2_netdev_ops = { #endif }; -static void inline vlan_features_add(struct net_device *dev, u32 flags) +static void inline vlan_features_add(struct net_device *dev, unsigned long flags) { dev->vlan_features |= flags; } @@ -8423,10 +8418,8 @@ bnx2_remove_one(struct pci_dev *pdev) kfree(bp->temp_stats_blk); - if (bp->flags & BNX2_FLAG_AER_ENABLED) { + if (bp->flags & BNX2_FLAG_PCIE) pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } free_netdev(dev); @@ -8542,7 +8535,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) } rtnl_unlock(); - if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) + if (!(bp->flags & BNX2_FLAG_PCIE)) return result; err = pci_cleanup_aer_uncorrect_error_status(pdev); diff --git a/trunk/drivers/net/bnx2.h b/trunk/drivers/net/bnx2.h index 0132ea959995..5488a2e82fe9 100644 --- a/trunk/drivers/net/bnx2.h +++ b/trunk/drivers/net/bnx2.h @@ -6741,7 +6741,6 @@ struct bnx2 { #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 #define BNX2_FLAG_BROKEN_STATS 0x00002000 -#define BNX2_FLAG_AER_ENABLED 0x00004000 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; @@ -6759,7 +6758,7 @@ struct bnx2 { u32 tx_wake_thresh; #ifdef BCM_CNIC - struct cnic_ops __rcu *cnic_ops; + struct cnic_ops *cnic_ops; void *cnic_data; #endif diff --git a/trunk/drivers/net/bnx2x/bnx2x.h b/trunk/drivers/net/bnx2x/bnx2x.h index 04fb72b923b2..8e4183717d91 100644 --- a/trunk/drivers/net/bnx2x/bnx2x.h +++ b/trunk/drivers/net/bnx2x/bnx2x.h @@ -22,8 +22,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.11-0" -#define DRV_MODULE_RELDATE "2011/01/31" +#define DRV_MODULE_VERSION "1.62.00-4" +#define DRV_MODULE_RELDATE "2011/01/18" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE @@ -1110,7 +1110,7 @@ struct bnx2x { #define BNX2X_CNIC_FLAG_MAC_SET 1 void *t2; dma_addr_t t2_mapping; - struct cnic_ops __rcu *cnic_ops; + struct cnic_ops *cnic_ops; void *cnic_data; u32 cnic_tag; struct cnic_eth_dev cnic_eth_dev; diff --git a/trunk/drivers/net/bnx2x/bnx2x_hsi.h b/trunk/drivers/net/bnx2x/bnx2x_hsi.h index 51d69db23a71..548f5631c0dc 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_hsi.h +++ b/trunk/drivers/net/bnx2x/bnx2x_hsi.h @@ -237,26 +237,8 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 - u32 Reserved0[3]; /* 0x158 */ - /* Controls the TX laser of the SFP+ module */ - u32 sfp_ctrl; /* 0x164 */ -#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF -#define PORT_HW_CFG_TX_LASER_SHIFT 0 -#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 -#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 -#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 -#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 -#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 - - /* Controls the fault module LED of the SFP+ */ -#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 -#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 -#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 - u32 Reserved01[12]; /* 0x158 */ + u32 Reserved0[16]; /* 0x158 */ + /* for external PHY, or forced mode or during AN */ u16 xgxs_config_rx[4]; /* 0x198 */ @@ -264,78 +246,12 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u32 Reserved1[56]; /* 0x1A8 */ u32 default_cfg; /* 0x288 */ -#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 -#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 -#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 -#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 -#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 - -#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C -#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 -#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 -#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 -#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c - -#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 -#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 -#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 -#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 -#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 - -#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 -#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 -#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 -#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 -#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 - - /* - * When KR link is required to be set to force which is not - * KR-compliant, this parameter determine what is the trigger for it. - * When GPIO is selected, low input will force the speed. Currently - * default speed is 1G. In the future, it may be widen to select the - * forced speed in with another parameter. Note when force-1G is - * enabled, it override option 56: Link Speed option. - */ -#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 -#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 -#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 -#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 - /* Enable to determine with which GPIO to reset the external phy */ -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 /* Enable BAM on KR */ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 - /* Enable Common Mode Sense */ -#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 -#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 -#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 -#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 - u32 speed_capability_mask2; /* 0x28C */ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 @@ -465,7 +381,6 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 diff --git a/trunk/drivers/net/bnx2x/bnx2x_link.c b/trunk/drivers/net/bnx2x/bnx2x_link.c index f2f367d4e74d..7160ec51093e 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_link.c +++ b/trunk/drivers/net/bnx2x/bnx2x_link.c @@ -1,4 +1,4 @@ -/* Copyright 2008-2011 Broadcom Corporation +/* Copyright 2008-2009 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -28,13 +28,12 @@ /********************************************************/ #define ETH_HLEN 14 -/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ -#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ #define ETH_MIN_PACKET_SIZE 60 #define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define MDIO_ACCESS_TIMEOUT 1000 -#define BMAC_CONTROL_RX_ENABLE 2 +#define BMAC_CONTROL_RX_ENABLE 2 /***********************************************************/ /* Shortcut definitions */ @@ -80,7 +79,7 @@ #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 -#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM #define AUTONEG_PARALLEL \ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION #define AUTONEG_SGMII_FIBER_AUTODET \ @@ -113,10 +112,10 @@ #define GP_STATUS_10G_KX4 \ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 -#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD -#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD -#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 #define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD #define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD #define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD @@ -124,18 +123,18 @@ #define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD #define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD -#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD -#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD -#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD -#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD +#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD +#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD #define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD #define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD -#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD -#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD -#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD -#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD -#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD -#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD +#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD +#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD +#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD +#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD +#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD +#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD #define PHY_XGXS_FLAG 0x1 #define PHY_SGMII_FLAG 0x2 @@ -143,7 +142,7 @@ /* */ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 - #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 + #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 @@ -154,15 +153,15 @@ #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 - #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 -#define SFP_EEPROM_OPTIONS_ADDR 0x40 +#define SFP_EEPROM_OPTIONS_ADDR 0x40 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 -#define SFP_EEPROM_OPTIONS_SIZE 2 +#define SFP_EEPROM_OPTIONS_SIZE 2 -#define EDC_MODE_LINEAR 0x0022 -#define EDC_MODE_LIMITING 0x0044 -#define EDC_MODE_PASSIVE_DAC 0x0055 +#define EDC_MODE_LINEAR 0x0022 +#define EDC_MODE_LIMITING 0x0044 +#define EDC_MODE_PASSIVE_DAC 0x0055 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) @@ -171,18 +170,24 @@ /* INTERFACE */ /**********************************************************/ -#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ +#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ bnx2x_cl45_write(_bp, _phy, \ (_phy)->def_md_devad, \ (_bank + (_addr & 0xf)), \ _val) -#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ +#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ bnx2x_cl45_read(_bp, _phy, \ (_phy)->def_md_devad, \ (_bank + (_addr & 0xf)), \ _val) +static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val); + +static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val); + static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) { u32 val = REG_RD(bp, reg); @@ -211,7 +216,7 @@ void bnx2x_ets_disabled(struct link_params *params) DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); - /* + /** * mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. @@ -220,7 +225,7 @@ void bnx2x_ets_disabled(struct link_params *params) */ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); - /* + /** * Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. @@ -232,12 +237,12 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* defines which entries (clients) are subjected to WFQ arbitration */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /* - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ + /** + * For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /* + /** * mapping between the CREDIT_WEIGHT registers and actual client * numbers */ @@ -250,7 +255,7 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); /* ETS mode disable */ REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /* + /** * If ETS mode is enabled (there is no strict priority) defines a WFQ * weight for COS0/COS1. */ @@ -263,24 +268,24 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); } -static void bnx2x_ets_bw_limit_common(const struct link_params *params) +void bnx2x_ets_bw_limit_common(const struct link_params *params) { /* ETS disabled configuration */ struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - /* - * defines which entries (clients) are subjected to WFQ arbitration - * COS0 0x8 - * COS1 0x10 - */ + /** + * defines which entries (clients) are subjected to WFQ arbitration + * COS0 0x8 + * COS1 0x10 + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); - /* - * mapping between the ARB_CREDIT_WEIGHT registers and actual - * client numbers (WEIGHT_0 does not actually have to represent - * client 0) - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 - */ + /** + * mapping between the ARB_CREDIT_WEIGHT registers and actual + * client numbers (WEIGHT_0 does not actually have to represent + * client 0) + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, @@ -293,14 +298,14 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 - * entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ + /** + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 + * entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ @@ -324,7 +329,8 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, if ((0 == total_bw) || (0 == cos0_bw) || (0 == cos1_bw)) { - DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); + DP(NETIF_MSG_LINK, + "bnx2x_ets_bw_limit: Total BW can't be zero\n"); return; } @@ -349,7 +355,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) u32 val = 0; DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); - /* + /** * Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, * 3 - COS0 entry, 4 - COS1 entry. @@ -358,7 +364,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * MCP and debug are strict */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); - /* + /** * For strict priority entries defines the number of consecutive slots * for the highest priority. */ @@ -371,14 +377,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); - /* - * mapping between entry priority to client number (0,1,2 -debug and - * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) - * 3bits client num. - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 - * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 - */ + /** + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 + * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 + */ val = (0 == strict_cos) ? 0x2318 : 0x22E0; REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); @@ -465,7 +471,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, /* MAC/PBF section */ /******************************************************************/ static void bnx2x_emac_init(struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { /* reset and unreset the emac core */ struct bnx2x *bp = params->bp; @@ -475,10 +481,10 @@ static void bnx2x_emac_init(struct link_params *params, u16 timeout; REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); udelay(5); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); /* init emac - use read-modify-write */ /* self clear reset */ @@ -509,7 +515,7 @@ static void bnx2x_emac_init(struct link_params *params, } static u8 bnx2x_emac_enable(struct link_params *params, - struct link_vars *vars, u8 lb) + struct link_vars *vars, u8 lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -521,33 +527,55 @@ static u8 bnx2x_emac_enable(struct link_params *params, /* enable emac and not bmac */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); + /* for paladium */ + if (CHIP_REV_IS_EMUL(bp)) { + /* Use lane 1 (of lanes 0-3) */ + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + + port*4, 1); + } + /* for fpga */ + else + + if (CHIP_REV_IS_FPGA(bp)) { + /* Use lane 1 (of lanes 0-3) */ + DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n"); + + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, + 0); + } else /* ASIC */ if (vars->phy_flags & PHY_XGXS_FLAG) { u32 ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); DP(NETIF_MSG_LINK, "XGXS\n"); /* select the master lanes (out of 0-3) */ - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + + port*4, ser_lane); /* select XGXS */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + + port*4, 1); } else { /* SerDes */ DP(NETIF_MSG_LINK, "SerDes\n"); /* select SerDes */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + + port*4, 0); } bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_RESET); + EMAC_RX_MODE_RESET); bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - EMAC_TX_MODE_RESET); + EMAC_TX_MODE_RESET); if (CHIP_REV_IS_SLOW(bp)) { /* config GMII mode */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, + (val | EMAC_MODE_PORT_GMII)); } else { /* ASIC */ /* pause enable/disable */ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, @@ -577,14 +605,14 @@ static u8 bnx2x_emac_enable(struct link_params *params, val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - /* - * Setting this bit causes MAC control frames (except for pause - * frames) to be passed on for processing. This setting has no - * affect on the operation of the pause frames. This bit effects - * all packets regardless of RX Parser packet sorting logic. - * Turn the PFC off to make sure we are in Xon state before - * enabling it. - */ + /** + * Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { DP(NETIF_MSG_LINK, "PFC is enabled\n"); @@ -638,7 +666,16 @@ static u8 bnx2x_emac_enable(struct link_params *params, REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); - REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); + if (CHIP_REV_IS_EMUL(bp)) { + /* take the BigMac out of reset */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable access for bmac registers */ + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); + } else + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); vars->mac_type = MAC_TYPE_EMAC; return 0; @@ -694,7 +731,8 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, val |= (1<<5); wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, + wb_data, 2); udelay(30); /* Tx control */ @@ -730,12 +768,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); - /* - * Set Time (based unit is 512 bit time) between automatic - * re-sending of PP packets amd enable automatic re-send of - * Per-Priroity Packet as long as pp_gen is asserted and - * pp_disable is low. - */ + /** + * Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ val = 0x8000; if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) val |= (1<<16); /* enable automatic re-send */ @@ -743,7 +781,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] = val; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, - wb_data, 2); + wb_data, 2); /* mac control */ val = 0x3; /* Enable RX and TX */ @@ -757,7 +795,8 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); } static void bnx2x_update_pfc_brb(struct link_params *params, @@ -786,25 +825,17 @@ static void bnx2x_update_pfc_brb(struct link_params *params, full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; } - /* - * The number of free blocks below which the pause signal to class 0 - * of MAC #n is asserted. n=0,1 - */ + /* The number of free blocks below which the pause signal to class 0 + of MAC #n is asserted. n=0,1 */ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); - /* - * The number of free blocks above which the pause signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ + /* The number of free blocks above which the pause signal to class 0 + of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); - /* - * The number of free blocks below which the full signal to class 0 - * of MAC #n is asserted. n=0,1 - */ + /* The number of free blocks below which the full signal to class 0 + of MAC #n is asserted. n=0,1 */ REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); - /* - * The number of free blocks above which the full signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ + /* The number of free blocks above which the full signal to class 0 + of MAC #n is de-asserted. n=0,1 */ REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); if (set_pfc && pfc_params) { @@ -828,25 +859,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params, full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; } - /* + /** * The number of free blocks below which the pause signal to * class 1 of MAC #n is asserted. n=0,1 - */ + **/ REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); - /* + /** * The number of free blocks above which the pause signal to * class 1 of MAC #n is de-asserted. n=0,1 - */ + **/ REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); - /* + /** * The number of free blocks below which the full signal to * class 1 of MAC #n is asserted. n=0,1 - */ + **/ REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); - /* + /** * The number of free blocks above which the full signal to * class 1 of MAC #n is de-asserted. n=0,1 - */ + **/ REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); } } @@ -865,7 +896,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, FEATURE_CONFIG_PFC_ENABLED; DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); - /* + /** * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set * MAC control frames (that are not pause packets) * will be forwarded to the XCM. @@ -873,7 +904,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : NIG_REG_LLH0_XCM_MASK); - /* + /** * nig params will override non PFC params, since it's possible to * do transition from PFC to SAFC */ @@ -963,7 +994,7 @@ void bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params) { - /* + /** * The PFC and pause are orthogonal to one another, meaning when * PFC is enabled, the pause are disabled, and when PFC is * disabled, pause are set according to the pause result. @@ -1004,7 +1035,7 @@ void bnx2x_update_pfc(struct link_params *params, static u8 bnx2x_bmac1_enable(struct link_params *params, struct link_vars *vars, - u8 is_lb) + u8 is_lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1018,8 +1049,9 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, /* XGXS control */ wb_data[0] = 0x3c; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); /* tx MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | @@ -1028,7 +1060,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, params->mac_addr[5]); wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, + wb_data, 2); /* mac control */ val = 0x3; @@ -1038,30 +1071,43 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, } wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, + wb_data, 2); /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, + wb_data, 2); bnx2x_update_pfc_bmac1(params, vars); /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, + wb_data, 2); /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, + wb_data, 2); /* configure safc */ wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, wb_data, 2); + /* fix for emulation */ + if (CHIP_REV_IS_EMUL(bp)) { + wb_data[0] = 0xf000; + wb_data[1] = 0; + REG_WR_DMAE(bp, + bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD, + wb_data, 2); + } + return 0; } @@ -1080,14 +1126,16 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[0] = 0; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); udelay(30); /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ wb_data[0] = 0x3c; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); udelay(30); @@ -1099,7 +1147,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, - wb_data, 2); + wb_data, 2); udelay(30); @@ -1107,24 +1155,27 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, - wb_data, 2); + wb_data, 2); udelay(30); /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, + wb_data, 2); udelay(30); /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, + wb_data, 2); udelay(30); /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, + wb_data, 2); udelay(30); bnx2x_update_pfc_bmac2(params, vars, is_lb); @@ -1140,11 +1191,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params, u32 val; /* reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); msleep(1); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); /* enable access for bmac registers */ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); @@ -1179,14 +1230,15 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status) struct bnx2x *bp = params->bp; REG_WR(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[params->port].link_status), link_status); + offsetof(struct shmem_region, + port_mb[params->port].link_status), + link_status); } static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; + NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); @@ -1198,12 +1250,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) if (CHIP_IS_E2(bp)) { /* Clear Rx Enable bit in BMAC_CONTROL register */ REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); } else { /* Clear Rx Enable bit in BMAC_CONTROL register */ REG_RD_DMAE(bp, bmac_addr + @@ -1219,7 +1271,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) } static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, - u32 line_speed) + u32 line_speed) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1256,7 +1308,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); /* update init credit */ - init_crd = 778; /* (800-18-4) */ + init_crd = 778; /* (800-18-4) */ } else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + @@ -1301,23 +1353,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, return 0; } -/* - * get_emac_base - * - * @param cb - * @param mdc_mdio_access - * @param port - * - * @return u32 - * - * This function selects the MDC/MDIO access (through emac0 or - * emac1) depend on the mdc_mdio_access, port, port swapped. Each - * phy has a default access mode, which could also be overridden - * by nvram configuration. This parameter, whether this is the - * default phy configuration, or the nvram overrun - * configuration, is passed here as mdc_mdio_access and selects - * the emac_base for the CL45 read/writes operations - */ static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 mdc_mdio_access, u8 port) { @@ -1350,16 +1385,13 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp, } -/******************************************************************/ -/* CL45 access functions */ -/******************************************************************/ -static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 val) +u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val) { u32 tmp, saved_mode; u8 i, rc = 0; - /* - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + + /* set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ @@ -1382,7 +1414,8 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, for (i = 0; i < 50; i++) { udelay(10); - tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + tmp = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { udelay(5); break; @@ -1390,7 +1423,6 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, } if (tmp & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } else { /* data */ @@ -1403,7 +1435,7 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, udelay(10); tmp = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); + EMAC_REG_EMAC_MDIO_COMM); if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { udelay(5); break; @@ -1411,7 +1443,6 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, } if (tmp & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } } @@ -1422,20 +1453,20 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, return rc; } -static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 *ret_val) +u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val) { u32 val, saved_mode; u16 i; u8 rc = 0; - /* - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + + /* set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT)); + EMAC_MDIO_MODE_CLOCK_CNT)); val |= (EMAC_MDIO_MODE_CLAUSE_45 | (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); @@ -1459,7 +1490,7 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, } if (val & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "read phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; rc = -EFAULT; @@ -1474,7 +1505,7 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, udelay(10); val = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); + EMAC_REG_EMAC_MDIO_COMM); if (!(val & EMAC_MDIO_COMM_START_BUSY)) { *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); break; @@ -1482,7 +1513,7 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, } if (val & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "read phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; rc = -EFAULT; } @@ -1498,7 +1529,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) { u8 phy_index; - /* + /** * Probe for the phy according to the given phy_addr, and execute * the read request on it */ @@ -1516,7 +1547,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 val) { u8 phy_index; - /* + /** * Probe for the phy according to the given phy_addr, and execute * the write request on it */ @@ -1545,15 +1576,16 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, aer_val = 0x3800 + offset - 1; else aer_val = 0x3800 + offset; - CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, aer_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); } static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, struct bnx2x_phy *phy) { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0x3800); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0x3800); } /******************************************************************/ @@ -1589,8 +1621,9 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) bnx2x_set_serdes_access(bp, port); - REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, - DEFAULT_PHY_DEV_ADDR); + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + + port*0x10, + DEFAULT_PHY_DEV_ADDR); } static void bnx2x_xgxs_deassert(struct link_params *params) @@ -1608,22 +1641,23 @@ static void bnx2x_xgxs_deassert(struct link_params *params) udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + + port*0x18, 0); REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); + params->phy[INT_PHY].def_md_devad); } void bnx2x_link_status_update(struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g; u8 port = params->port; vars->link_status = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[port].link_status)); + offsetof(struct shmem_region, + port_mb[port].link_status)); vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); @@ -1633,7 +1667,7 @@ void bnx2x_link_status_update(struct link_params *params, vars->phy_link_up = 1; vars->duplex = DUPLEX_FULL; switch (vars->link_status & - LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + LINK_STATUS_SPEED_AND_DUPLEX_MASK) { case LINK_10THD: vars->duplex = DUPLEX_HALF; /* fall thru */ @@ -1745,20 +1779,20 @@ static void bnx2x_set_master_ln(struct link_params *params, { struct bnx2x *bp = params->bp; u16 new_master_ln, ser_lane; - ser_lane = ((params->lane_config & + ser_lane = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); /* set the master_ln for AN */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - &new_master_ln); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + &new_master_ln); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2 , - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - (new_master_ln | ser_lane)); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2 , + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); } static u8 bnx2x_reset_unicore(struct link_params *params, @@ -1768,16 +1802,17 @@ static u8 bnx2x_reset_unicore(struct link_params *params, struct bnx2x *bp = params->bp; u16 mii_control; u16 i; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); /* reset the unicore */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_RESET)); if (set_serdes) bnx2x_set_serdes_access(bp, params->port); @@ -1786,10 +1821,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params, udelay(5); /* the reset erased the previous bank value */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { udelay(5); @@ -1797,9 +1832,6 @@ static u8 bnx2x_reset_unicore(struct link_params *params, } } - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - params->port); DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); return -EINVAL; @@ -1809,45 +1841,43 @@ static void bnx2x_set_swap_lanes(struct link_params *params, struct bnx2x_phy *phy) { struct bnx2x *bp = params->bp; - /* - * Each two bits represents a lane number: - * No swap is 0123 => 0x1b no need to enable the swap - */ + /* Each two bits represents a lane number: + No swap is 0123 => 0x1b no need to enable the swap */ u16 ser_lane, rx_lane_swap, tx_lane_swap; ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); rx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); tx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); if (rx_lane_swap != 0x1b) { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, - (rx_lane_swap | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); } else { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); } if (tx_lane_swap != 0x1b) { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, - (tx_lane_swap | - MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); } else { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); } } @@ -1856,66 +1886,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 control2; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - &control2); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + &control2); if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; else control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", phy->speed_cap_mask, control2); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - control2); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + control2); if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { DP(NETIF_MSG_LINK, "XGXS\n"); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - &control2); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); control2 |= MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - control2); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); /* Disable parallel detection of HiG */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); } } static void bnx2x_set_autoneg(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars, - u8 enable_cl73) + struct link_vars *vars, + u8 enable_cl73) { struct bnx2x *bp = params->bp; u16 reg_val; /* CL37 Autoneg */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); /* CL37 Autoneg Enabled */ if (vars->line_speed == SPEED_AUTO_NEG) @@ -1924,15 +1954,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); /* Enable/Disable Autodetection */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; @@ -1941,14 +1971,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, else reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); /* Enable TetonII and BAM autoneg */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, ®_val); if (vars->line_speed == SPEED_AUTO_NEG) { /* Enable BAM aneg Mode and TetonII aneg Mode */ @@ -1959,20 +1989,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); } - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, - reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + reg_val); if (enable_cl73) { /* Enable Cl73 FSM status bits */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_UCTRL, - 0xe); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, + 0xe); /* Enable BAM Station Manager*/ - CL22_WR_OVER_CL45(bp, phy, + CL45_WR_OVER_CL22(bp, phy, MDIO_REG_BANK_CL73_USERB0, MDIO_CL73_USERB0_CL73_BAM_CTRL1, MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | @@ -1980,10 +2010,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); /* Advertise CL73 link speeds */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - ®_val); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + ®_val); if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; @@ -1991,10 +2021,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + reg_val); /* CL73 Autoneg Enabled */ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; @@ -2002,39 +2032,37 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, } else /* CL73 Autoneg Disabled */ reg_val = 0; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); } /* program SerDes, forced speed */ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 reg_val; /* program duplex, disable autoneg and sgmii*/ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); if (phy->req_duplex == DUPLEX_FULL) reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - - /* - * program speed - * - needed only if the speed is greater than 1G (2.5G or 10G) - */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, ®_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* program speed + - needed only if the speed is greater than 1G (2.5G or 10G) */ + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); /* clearing the speed value before setting the right speed */ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); @@ -2055,9 +2083,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; } - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, reg_val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); } @@ -2074,13 +2102,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, val |= MDIO_OVER_1G_UP1_2_5G; if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) val |= MDIO_OVER_1G_UP1_10G; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP1, val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP1, val); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP3, 0x400); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP3, 0x400); } static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, @@ -2088,21 +2116,22 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /* - * Resolve pause mode and advertisement. - * Please refer to Table 28B-3 of the 802.3ab-1999 spec - */ + /* resolve pause mode and advertisement + * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ switch (phy->req_flow_ctrl) { case BNX2X_FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - else + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + } else { *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + } break; case BNX2X_FLOW_CTRL_TX: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; break; case BNX2X_FLOW_CTRL_RX: @@ -2120,23 +2149,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, struct link_params *params, - u16 ieee_fc) + u16 ieee_fc) { struct bnx2x *bp = params->bp; u16 val; /* for AN, we are always publishing full duplex */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, &val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, val); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); } static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, @@ -2150,67 +2179,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, /* Enable and restart BAM/CL37 aneg */ if (enable_cl73) { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - &mii_control); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - (mii_control | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); } else { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg mii_control before = 0x%x\n", mii_control); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); } } static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 control1; /* in SGMII mode, the unicore is always slave */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - &control1); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + &control1); control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; /* set sgmii mode (and not fiber) */ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - control1); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + control1); /* if forced speed */ if (!(vars->line_speed == SPEED_AUTO_NEG)) { /* set speed, disable autoneg */ u16 mii_control; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); @@ -2238,10 +2267,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, if (phy->req_duplex == DUPLEX_FULL) mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - mii_control); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + mii_control); } else { /* AN mode */ /* enable and restart AN */ @@ -2256,19 +2285,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) { /* LD LP */ - switch (pause_result) { /* ASYM P ASYM P */ - case 0xb: /* 1 0 1 1 */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; break; - case 0xe: /* 1 1 1 0 */ + case 0xe: /* 1 1 1 0 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; break; - case 0x5: /* 0 1 0 1 */ - case 0x7: /* 0 1 1 1 */ - case 0xd: /* 1 1 0 1 */ - case 0xf: /* 1 1 1 1 */ + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; break; @@ -2288,24 +2317,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, u16 pd_10g, status2_1000x; if (phy->req_line_speed != SPEED_AUTO_NEG) return 0; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", params->port); return 1; } - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, - &pd_10g); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, + &pd_10g); if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", @@ -2344,14 +2373,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_LP_ADV1, - &lp_pause); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); pause_result = (ld_pause & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; @@ -2361,18 +2390,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result); } else { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, - &lp_pause); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); pause_result = (ld_pause & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); } @@ -2388,25 +2417,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, u16 rx_status, ustat_val, cl37_fsm_recieved; DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); /* Step 1: Make sure signal is detected */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_RX0, - MDIO_RX0_RX_STATUS, - &rx_status); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_RX0, + MDIO_RX0_RX_STATUS, + &rx_status); if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != (MDIO_RX0_RX_STATUS_SIGDET)) { DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." "rx_status(0x80b0) = 0x%x\n", rx_status); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); return; } /* Step 2: Check CL73 state machine */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_USTAT1, - &ustat_val); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, + &ustat_val); if ((ustat_val & (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != @@ -2416,14 +2445,12 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, "ustat_val(0x8371) = 0x%x\n", ustat_val); return; } - /* - * Step 3: Check CL37 Message Pages received to indicate LP - * supports only CL37 - */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_REMOTE_PHY, - MDIO_REMOTE_PHY_MISC_RX_STATUS, - &cl37_fsm_recieved); + /* Step 3: Check CL37 Message Pages received to indicate LP + supports only CL37 */ + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, + &cl37_fsm_recieved); if ((cl37_fsm_recieved & (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != @@ -2434,18 +2461,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, cl37_fsm_recieved); return; } - /* - * The combined cl37/cl73 fsm state information indicating that - * we are connected to a device which does not support cl73, but - * does support cl37 BAM. In this case we disable cl73 and - * restart cl37 auto-neg - */ - + /* The combined cl37/cl73 fsm state information indicating that we are + connected to a device which does not support cl73, but does support + cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ /* Disable CL73 */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - 0); + CL45_WR_OVER_CL22(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + 0); /* Restart CL37 autoneg */ bnx2x_restart_autoneg(phy, params, 0); DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); @@ -2470,14 +2493,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 new_line_speed, gp_status; + u16 new_line_speed , gp_status; u8 rc = 0; /* Read gp_status */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); if (phy->req_line_speed == SPEED_AUTO_NEG) vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; @@ -2614,9 +2637,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) u16 bank; /* read precomp */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_LP_UP2, &lp_up2); + CL45_RD_OVER_CL22(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP2, &lp_up2); /* bits [10:7] at lp_up2, positioned at [15:12] */ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> @@ -2628,18 +2651,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { - CL22_RD_OVER_CL45(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, &tx_driver); + CL45_RD_OVER_CL22(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, &tx_driver); /* replace tx_driver bits [15:12] */ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; tx_driver |= lp_up2; - CL22_WR_OVER_CL45(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, tx_driver); + CL45_WR_OVER_CL22(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, tx_driver); } } } @@ -2653,10 +2676,10 @@ static u8 bnx2x_emac_program(struct link_params *params, DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + - EMAC_REG_EMAC_MODE, - (EMAC_MODE_25G_MODE | - EMAC_MODE_PORT_MII_10M | - EMAC_MODE_HALF_DUPLEX)); + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | + EMAC_MODE_HALF_DUPLEX)); switch (vars->line_speed) { case SPEED_10: mode |= EMAC_MODE_PORT_MII_10M; @@ -2684,8 +2707,8 @@ static u8 bnx2x_emac_program(struct link_params *params, if (vars->duplex == DUPLEX_HALF) mode |= EMAC_MODE_HALF_DUPLEX; bnx2x_bits_en(bp, - GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, - mode); + GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, + mode); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); return 0; @@ -2700,7 +2723,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { - CL22_WR_OVER_CL45(bp, phy, + CL45_WR_OVER_CL22(bp, phy, bank, MDIO_RX0_RX_EQ_BOOST, phy->rx_preemphasis[i]); @@ -2708,7 +2731,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { - CL22_WR_OVER_CL45(bp, phy, + CL45_WR_OVER_CL22(bp, phy, bank, MDIO_TX0_TX_DRIVER, phy->tx_preemphasis[i]); @@ -2731,7 +2754,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, /* forced speed requested? */ if (vars->line_speed != SPEED_AUTO_NEG || (SINGLE_MEDIA_DIRECT(params) && - params->loopback_mode == LOOPBACK_EXT)) { + params->loopback_mode == LOOPBACK_EXT)) { DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); /* disable autoneg */ @@ -2748,7 +2771,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, /* program duplex & pause advertisement (for aneg) */ bnx2x_set_ieee_aneg_advertisment(phy, params, - vars->ieee_fc); + vars->ieee_fc); /* enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73); @@ -2819,8 +2842,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, } static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, - struct bnx2x_phy *phy, - struct link_params *params) + struct bnx2x_phy *phy) { u16 cnt, ctrl; /* Wait for soft reset to get cleared upto 1 sec */ @@ -2831,11 +2853,6 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, break; msleep(1); } - - if (cnt == 1000) - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - params->port); DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); return cnt; } @@ -2846,7 +2863,9 @@ static void bnx2x_link_int_enable(struct link_params *params) u32 mask; struct bnx2x *bp = params->bp; - /* Setting the status to report on link up for either XGXS or SerDes */ + /* setting the status to report on link up + for either XGXS or SerDes */ + if (params->switch_cfg == SWITCH_CFG_10G) { mask = (NIG_MASK_XGXS0_LINK10G | NIG_MASK_XGXS0_LINK_STATUS); @@ -2889,7 +2908,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, { u32 latch_status = 0; - /* + /** * Disable the MI INT ( external phy int ) by writing 1 to the * status register. Link down indication is high-active-signal, * so in this case we need to write the status to clear the XOR @@ -2914,30 +2933,27 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, /* For all latched-signal=up : Re-Arm Latch signals */ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, - (latch_status & 0xfffe) | (latch_status & 1)); + (latch_status & 0xfffe) | (latch_status & 1)); } /* For all latched-signal=up,Write original_signal to status */ } static void bnx2x_link_int_ack(struct link_params *params, - struct link_vars *vars, u8 is_10g) + struct link_vars *vars, u8 is_10g) { struct bnx2x *bp = params->bp; u8 port = params->port; - /* - * First reset all status we assume only one line will be - * change at a time - */ + /* first reset all status + * we assume only one line will be change at a time */ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - (NIG_STATUS_XGXS0_LINK10G | - NIG_STATUS_XGXS0_LINK_STATUS | - NIG_STATUS_SERDES0_LINK_STATUS)); + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS)); if (vars->phy_link_up) { if (is_10g) { - /* - * Disable the 10G link interrupt by writing 1 to the - * status register + /* Disable the 10G link interrupt + * by writing 1 to the status register */ DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); bnx2x_bits_en(bp, @@ -2945,9 +2961,9 @@ static void bnx2x_link_int_ack(struct link_params *params, NIG_STATUS_XGXS0_LINK10G); } else if (params->switch_cfg == SWITCH_CFG_10G) { - /* - * Disable the link interrupt by writing 1 to the - * relevant lane in the status register + /* Disable the link interrupt + * by writing 1 to the relevant lane + * in the status register */ u32 ser_lane = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> @@ -2962,9 +2978,8 @@ static void bnx2x_link_int_ack(struct link_params *params, } else { /* SerDes */ DP(NETIF_MSG_LINK, "SerDes phy link up\n"); - /* - * Disable the link interrupt by writing 1 to the status - * register + /* Disable the link interrupt + * by writing 1 to the status register */ bnx2x_bits_en(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, @@ -3044,7 +3059,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, } if ((params->num_phys == MAX_PHYS) && (params->phy[EXT_PHY2].ver_addr != 0)) { - spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); + spirom_ver = REG_RD(bp, + params->phy[EXT_PHY2].ver_addr); if (params->phy[EXT_PHY2].format_fw_ver) { *ver_p = '/'; ver_p++; @@ -3073,27 +3089,29 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, /* change the uni_phy_addr in the nig */ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + - port*0x18)); + port*0x18)); REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_AER_BLOCK + - (MDIO_AER_BLOCK_AER_REG & 0xf)), - 0x2800); + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), + 0x2800); bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_CL73_IEEEB0 + - (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), - 0x6041); + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); msleep(200); /* set aer mmd back */ bnx2x_set_aer_mmd_xgxs(params, phy); /* and md_devad */ - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + md_devad); + } else { u16 mii_ctrl; DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); @@ -3134,26 +3152,26 @@ u8 bnx2x_set_led(struct link_params *params, case LED_MODE_OFF: REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - SHARED_HW_CFG_LED_MAC1); + SHARED_HW_CFG_LED_MAC1); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); break; case LED_MODE_OPER: - /* + /** * For all other phys, OPER mode is same as ON, so in case * link is down, do nothing - */ + **/ if (!vars->link_up) break; case LED_MODE_ON: if (params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && CHIP_IS_E2(bp) && params->num_phys == 2) { - /* - * This is a work-around for E2+8727 Configurations - */ + /** + * This is a work-around for E2+8727 Configurations + */ if (mode == LED_MODE_ON || speed == SPEED_10000){ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); @@ -3165,40 +3183,41 @@ u8 bnx2x_set_led(struct link_params *params, return rc; } } else if (SINGLE_MEDIA_DIRECT(params)) { - /* - * This is a work-around for HW issue found when link - * is up in CL73 - */ + /** + * This is a work-around for HW issue found when link + * is up in CL73 + */ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); } else { - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + hw_led_mode); } - REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + + port*4, 0); /* Set blinking rate to ~15.9Hz */ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, - LED_BLINK_RATE_VAL); + LED_BLINK_RATE_VAL); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + - port*4, 1); + port*4, 1); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE))); + EMAC_WR(bp, EMAC_REG_EMAC_LED, + (tmp & (~EMAC_LED_OVERRIDE))); if (CHIP_IS_E1(bp) && ((speed == SPEED_2500) || (speed == SPEED_1000) || (speed == SPEED_100) || (speed == SPEED_10))) { - /* - * On Everest 1 Ax chip versions for speeds less than - * 10G LED scheme is different - */ + /* On Everest 1 Ax chip versions for speeds less than + 10G LED scheme is different */ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 - + port*4, 1); + + port*4, 1); REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + - port*4, 0); + port*4, 0); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + - port*4, 1); + port*4, 1); } break; @@ -3212,7 +3231,7 @@ u8 bnx2x_set_led(struct link_params *params, } -/* +/** * This function comes to reflect the actual link state read DIRECTLY from the * HW */ @@ -3224,10 +3243,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, u8 ext_phy_link_up = 0, serdes_phy_type; struct link_vars temp_vars; - CL22_RD_OVER_CL45(bp, ¶ms->phy[INT_PHY], - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); + CL45_RD_OVER_CL22(bp, ¶ms->phy[INT_PHY], + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); /* link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; @@ -3271,15 +3290,15 @@ static u8 bnx2x_link_initialize(struct link_params *params, u8 rc = 0; u8 phy_index, non_ext_phy; struct bnx2x *bp = params->bp; - /* - * In case of external phy existence, the line speed would be the - * line speed linked up by the external phy. In case it is direct - * only, then the line_speed during initialization will be - * equal to the req_line_speed - */ + /** + * In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ vars->line_speed = params->phy[INT_PHY].req_line_speed; - /* + /** * Initialize the internal phy in case this is a direct board * (no external phys), or this board has external phy which requires * to first. @@ -3307,16 +3326,17 @@ static u8 bnx2x_link_initialize(struct link_params *params, if (!non_ext_phy) for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { - /* + /** * No need to initialize second phy in case of first * phy only selection. In case of second phy, we do * need to initialize the first phy, since they are * connected. - */ + **/ if (phy_index == EXT_PHY2 && (bnx2x_phy_selection(params) == PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { - DP(NETIF_MSG_LINK, "Ignoring second phy\n"); + DP(NETIF_MSG_LINK, "Not initializing" + "second phy\n"); continue; } params->phy[phy_index].config_init( @@ -3338,8 +3358,9 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy, struct link_params *params) { /* reset the SerDes/XGXS */ - REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, - (0x1ff << (params->port*16))); + REG_WR(params->bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port*16))); } static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, @@ -3353,11 +3374,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, else gpio_port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); DP(NETIF_MSG_LINK, "reset external PHY\n"); } @@ -3388,8 +3409,9 @@ static u8 bnx2x_update_link_down(struct link_params *params, /* reset BigMac */ bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); return 0; } @@ -3440,7 +3462,7 @@ static u8 bnx2x_update_link_up(struct link_params *params, msleep(20); return rc; } -/* +/** * The bnx2x_link_update function should be called upon link * interrupt. * Link is considered up as follows: @@ -3479,11 +3501,12 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + - port*0x18) > 0); + port*0x18) > 0); DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), is_mi_int, - REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); + REG_RD(bp, + NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), @@ -3492,14 +3515,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) /* disable emac */ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - /* - * Step 1: - * Check external link change only for external phys, and apply - * priority selection between them in case the link on both phys - * is up. Note that the instead of the common vars, a temporary - * vars argument is used since each phy may have different link/ - * speed/duplex result - */ + /** + * Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that the instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { struct bnx2x_phy *phy = ¶ms->phy[phy_index]; @@ -3524,22 +3547,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) switch (bnx2x_phy_selection(params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - /* + /** * In this option, the first PHY makes sure to pass the * traffic through itself only. * Its not clear how to reset the link on the second phy - */ + **/ active_external_phy = EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - /* + /** * In this option, the first PHY makes sure to pass the * traffic through the second PHY. - */ + **/ active_external_phy = EXT_PHY2; break; default: - /* + /** * Link indication on both PHYs with the following cases * is invalid: * - FIRST_PHY means that second phy wasn't initialized, @@ -3547,7 +3570,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) * - SECOND_PHY means that first phy should not be able * to link up by itself (using configuration) * - DEFAULT should be overriden during initialiazation - */ + **/ DP(NETIF_MSG_LINK, "Invalid link indication" "mpc=0x%x. DISABLING LINK !!!\n", params->multi_phy_config); @@ -3557,18 +3580,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) } } prev_line_speed = vars->line_speed; - /* - * Step 2: - * Read the status of the internal phy. In case of - * DIRECT_SINGLE_MEDIA board, this link is the external link, - * otherwise this is the link between the 577xx and the first - * external phy - */ + /** + * Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ if (params->phy[INT_PHY].read_status) params->phy[INT_PHY].read_status( ¶ms->phy[INT_PHY], params, vars); - /* + /** * The INT_PHY flow control reside in the vars. This include the * case where the speed or flow control are not set to AUTO. * Otherwise, the active external phy flow control result is set @@ -3578,13 +3601,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) */ if (active_external_phy > INT_PHY) { vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; - /* + /** * Link speed is taken from the XGXS. AN and FC result from * the external phy. */ vars->link_status |= phy_vars[active_external_phy].link_status; - /* + /** * if active_external_phy is first PHY and link is up - disable * disable TX on second external PHY */ @@ -3620,7 +3643,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," " ext_phy_line_speed = %d\n", vars->flow_ctrl, vars->link_status, ext_phy_line_speed); - /* + /** * Upon link speed change set the NIG into drain mode. Comes to * deals with possible FIFO glitch due to clk change when speed * is decreased without link down indicator @@ -3635,8 +3658,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) ext_phy_line_speed); vars->phy_link_up = 0; } else if (prev_line_speed != vars->line_speed) { - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, - 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + + params->port*4, 0); msleep(1); } } @@ -3651,14 +3674,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) bnx2x_link_int_ack(params, vars, link_10g); - /* - * In case external phy link is up, and internal link is down - * (not initialized yet probably after link initialization, it - * needs to be initialized. - * Note that after link down-up as result of cable plug, the xgxs - * link would probably become up again without the need - * initialize it - */ + /** + * In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ if (!(SINGLE_MEDIA_DIRECT(params))) { DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," " init_preceding = %d\n", ext_phy_link_up, @@ -3678,9 +3701,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars); } } - /* - * Link is up only if both local phy and external phy (in case of - * non-direct board) are up + /** + * Link is up only if both local phy and external phy (in case of + * non-direct board) are up */ vars->link_up = (vars->phy_link_up && (ext_phy_link_up || @@ -3701,10 +3724,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); msleep(1); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); } static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, @@ -3724,9 +3747,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u16 fw_ver1, fw_ver2; bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); + MDIO_PMA_REG_ROM_VER1, &fw_ver1); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &fw_ver2); + MDIO_PMA_REG_ROM_VER2, &fw_ver2); bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); } @@ -3747,7 +3770,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; } if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == @@ -3778,11 +3801,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { ret = 1; bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV_PAUSE, &ld_pause); + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; pause_result |= (lp_pause & @@ -3858,31 +3881,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, /* Boot port from external ROM */ /* EDC grst */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x0001); /* ucode reboot and rst */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x008c); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x008c); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); /* Reset internal microprocessor */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); /* Release srst bit */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); /* Delay 100ms per the PHY specifications */ msleep(100); @@ -3913,8 +3936,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, /* Clear ser_boot_ctl bit */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); bnx2x_save_bcm_spirom_ver(bp, phy, port); DP(NETIF_MSG_LINK, @@ -3925,6 +3948,48 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, return rc; } +static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + u16 val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val == 0) { + /* Mustn't set low power mode in 8073 A0 */ + return; + } + + /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ + bnx2x_cl45_read(bp, phy, + MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); + val &= ~(1<<13); + bnx2x_cl45_write(bp, phy, + MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); + + /* PLL controls */ + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490); + + /* Tx Controls */ + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640); + + /* Rx Controls */ + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); + + /* Enable PLL sequencer (use read-modify-write to set bit 13) */ + bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); + val |= (1<<13); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); +} + /******************************************************************/ /* BCM8073 PHY SECTION */ /******************************************************************/ @@ -3935,8 +4000,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) /* Read 8073 HW revision*/ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); if (val != 1) { /* No need to workaround in 8073 A1 */ @@ -3944,8 +4009,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) } bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &val); /* SNR should be applied only for version 0x102 */ if (val != 0x102) @@ -3959,8 +4024,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) u16 val, cnt, cnt1 ; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); if (val > 0) { /* No need to workaround in 8073 A1 */ @@ -3968,32 +4033,26 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) } /* XAUI workaround in 8073 A0: */ - /* - * After loading the boot ROM and restarting Autoneg, poll - * Dev1, Reg $C820: - */ + /* After loading the boot ROM and restarting Autoneg, + poll Dev1, Reg $C820: */ for (cnt = 0; cnt < 1000; cnt++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_SPEED_LINK_STATUS, - &val); - /* - * If bit [14] = 0 or bit [13] = 0, continue on with - * system initialization (XAUI work-around not required, as - * these bits indicate 2.5G or 1G link up). - */ + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &val); + /* If bit [14] = 0 or bit [13] = 0, continue on with + system initialization (XAUI work-around not required, + as these bits indicate 2.5G or 1G link up). */ if (!(val & (1<<14)) || !(val & (1<<13))) { DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); return 0; } else if (!(val & (1<<15))) { - DP(NETIF_MSG_LINK, "bit 15 went off\n"); - /* - * If bit 15 is 0, then poll Dev1, Reg $C841 until it's - * MSB (bit15) goes to 1 (indicating that the XAUI - * workaround has completed), then continue on with - * system initialization. - */ + DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); + /* If bit 15 is 0, then poll Dev1, Reg $C841 until + it's MSB (bit 15) goes to 1 (indicating that the + XAUI workaround has completed), + then continue on with system initialization.*/ for (cnt1 = 0; cnt1 < 1000; cnt1++) { bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -4076,10 +4135,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, gpio_port = params->port; /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); /* enable LASI */ bnx2x_cl45_write(bp, phy, @@ -4089,6 +4148,8 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_8073_set_pause_cl37(params, phy, vars); + bnx2x_8073_set_xaui_low_power_mode(bp, phy); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); @@ -4097,6 +4158,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); + /** + * If this is forced speed, set to KR or KX (all other are not + * supported) + */ /* Swap polarity if required - Must be done only in non-1G mode */ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { /* Configure the 8073 to swap _P and _N of the KR lines */ @@ -4139,10 +4204,8 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, val = (1<<7); } else if (phy->req_line_speed == SPEED_2500) { val = (1<<5); - /* - * Note that 2.5G works only when used with 1G - * advertisment - */ + /* Note that 2.5G works only + when used with 1G advertisment */ } else val = (1<<5); } else { @@ -4151,7 +4214,8 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) val |= (1<<7); - /* Note that 2.5G works only when used with 1G advertisment */ + /* Note that 2.5G works only when + used with 1G advertisment */ if (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) @@ -4191,11 +4255,9 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, /* Add support for CL37 (passive mode) III */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - /* - * The SNR will improve about 2db by changing BW and FEE main - * tap. Rest commands are executed after link is up - * Change FFE main cursor to 5 in EDC register - */ + /* The SNR will improve about 2db by changing + BW and FEE main tap. Rest commands are executed + after link is up*/ if (bnx2x_8073_is_snr_needed(bp, phy)) bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, @@ -4279,11 +4341,12 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { - /* - * The SNR will improve about 2dbby changing the BW and FEE main - * tap. The 1st write to change FFE main tap is set before - * restart AN. Change PLL Bandwidth in EDC register - */ + /* The SNR will improve about 2dbby + changing the BW and FEE main tap.*/ + /* The 1st write to change FFE main + tap is set before restart AN */ + /* Change PLL Bandwidth in EDC + register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 0x26BC); @@ -4327,10 +4390,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); - /* - * Set bit 3 to invert Rx in 1G mode and clear this bit - * when it`s in 10G mode. - */ + /** + * Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ if (vars->line_speed == SPEED_1000) { DP(NETIF_MSG_LINK, "Swapping 1G polarity for" "the 8073\n"); @@ -4362,8 +4425,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); } /******************************************************************/ @@ -4377,11 +4440,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "init 8705\n"); /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); @@ -4432,79 +4495,35 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, /******************************************************************/ /* SFP+ module Section */ /******************************************************************/ -static u8 bnx2x_get_gpio_port(struct link_params *params) -{ - u8 gpio_port; - u32 swap_val, swap_override; - struct bnx2x *bp = params->bp; - if (CHIP_IS_E2(bp)) - gpio_port = BP_PATH(bp); - else - gpio_port = params->port; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - return gpio_port ^ (swap_val && swap_override); -} -static void bnx2x_sfp_set_transmitter(struct link_params *params, +static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 port, u8 tx_en) { u16 val; - u8 port = params->port; - struct bnx2x *bp = params->bp; - u32 tx_en_mode; + DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n", + tx_en, port); /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].sfp_ctrl)) & - PORT_HW_CFG_TX_LASER_MASK; - DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " - "mode = %x\n", tx_en, port, tx_en_mode); - switch (tx_en_mode) { - case PORT_HW_CFG_TX_LASER_MDIO: - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &val); - - if (tx_en) - val &= ~(1<<15); - else - val |= (1<<15); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - val); - break; - case PORT_HW_CFG_TX_LASER_GPIO0: - case PORT_HW_CFG_TX_LASER_GPIO1: - case PORT_HW_CFG_TX_LASER_GPIO2: - case PORT_HW_CFG_TX_LASER_GPIO3: - { - u16 gpio_pin; - u8 gpio_port, gpio_mode; - if (tx_en) - gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; - else - gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + if (tx_en) + val &= ~(1<<15); + else + val |= (1<<15); - gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; - gpio_port = bnx2x_get_gpio_port(params); - bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); - break; - } - default: - DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); - break; - } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + val); } static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val = 0; @@ -4517,23 +4536,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - (byte_cnt | 0xa000)); + (byte_cnt | 0xa000)); /* Set the read command address */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); + addr); /* Activate read command */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x2c0f); + 0x2c0f); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) break; @@ -4551,15 +4570,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Read the buffer */ for (i = 0; i < byte_cnt; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); } for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; @@ -4570,7 +4589,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val, i; @@ -4583,43 +4602,41 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Need to read from 1.8000 to clear it */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + &val); /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - ((byte_cnt < 2) ? 2 : byte_cnt)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); /* Set the read command address */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); /* Set the destination address */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - 0x8004, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + MDIO_PMA_DEVAD, + 0x8004, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); /* Activate read command */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x8002); - /* - * Wait appropriate time for two-wire command to finish before - * polling the status register - */ + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x8002); + /* Wait appropriate time for two-wire command to finish before + polling the status register */ msleep(1); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) break; @@ -4631,21 +4648,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Got bad status 0x%x when reading from SFP+ EEPROM\n", (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); - return -EFAULT; + return -EINVAL; } /* Read the buffer */ for (i = 0; i < byte_cnt; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); } for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; @@ -4655,22 +4672,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } -u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) +static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf) { if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf); else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf); return -EINVAL; } static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, struct link_params *params, - u16 *edc_mode) + u16 *edc_mode) { struct bnx2x *bp = params->bp; u8 val, check_limiting_mode = 0; @@ -4691,10 +4708,8 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, { u8 copper_module_type; - /* - * Check if its active cable (includes SFP+ module) - * of passive cable - */ + /* Check if its active cable( includes SFP+ module) + of passive cable*/ if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_FC_TX_TECH_ADDR, @@ -4753,10 +4768,8 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); return 0; } -/* - * This function read the relevant field from the module (SFP+), and verify it - * is compliant with this board - */ +/* This function read the relevant field from the module ( SFP+ ), + and verify it is compliant with this board */ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, struct link_params *params) { @@ -4805,24 +4818,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, /* format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, - SFP_EEPROM_VENDOR_NAME_ADDR, - SFP_EEPROM_VENDOR_NAME_SIZE, - (u8 *)vendor_name)) + SFP_EEPROM_VENDOR_NAME_ADDR, + SFP_EEPROM_VENDOR_NAME_SIZE, + (u8 *)vendor_name)) vendor_name[0] = '\0'; else vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; if (bnx2x_read_sfp_module_eeprom(phy, params, - SFP_EEPROM_PART_NO_ADDR, - SFP_EEPROM_PART_NO_SIZE, - (u8 *)vendor_pn)) + SFP_EEPROM_PART_NO_ADDR, + SFP_EEPROM_PART_NO_SIZE, + (u8 *)vendor_pn)) vendor_pn[0] = '\0'; else vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; - netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," - " Port %d from %s part number %s\n", - params->port, vendor_name, vendor_pn); + netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," + " Port %d from %s part number %s\n", + params->port, vendor_name, vendor_pn); phy->flags |= FLAGS_SFP_NOT_APPROVED; return -EINVAL; } @@ -4834,11 +4847,8 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, u8 val; struct bnx2x *bp = params->bp; u16 timeout; - /* - * Initialization time after hot-plug may take up to 300ms for - * some phys type ( e.g. JDSU ) - */ - + /* Initialization time after hot-plug may take up to 300ms for some + phys type ( e.g. JDSU ) */ for (timeout = 0; timeout < 60; timeout++) { if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) == 0) { @@ -4857,14 +4867,16 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, /* Make sure GPIOs are not using for LED mode */ u16 val; /* - * In the GPIO register, bit 4 is use to determine if the GPIOs are + * In the GPIO register, bit 4 is use to detemine if the GPIOs are * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for * output * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 * where the 1st bit is the over-current(only input), and 2nd bit is * for power( only output ) - * + */ + + /* * In case of NOC feature is disabled and power is up, set GPIO control * as input to enable listening of over-current indication */ @@ -4893,14 +4905,15 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, u16 cur_limiting_mode; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &cur_limiting_mode); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &cur_limiting_mode); DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", cur_limiting_mode); if (edc_mode == EDC_MODE_LIMITING) { - DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); + DP(NETIF_MSG_LINK, + "Setting LIMITING MODE\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, @@ -4909,63 +4922,62 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); - /* - * Changing to LRM mode takes quite few seconds. So do it only - * if current mode is limiting (default is LRM) - */ + /* Changing to LRM mode takes quite few seconds. + So do it only if current mode is limiting + ( default is LRM )*/ if (cur_limiting_mode != EDC_MODE_LIMITING) return 0; bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - 0x128); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + 0x128); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL0, - 0x4008); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, + 0x4008); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0xaaaa); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0xaaaa); } return 0; } static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, struct bnx2x_phy *phy, - u16 edc_mode) + u16 edc_mode) { u16 phy_identifier; u16 rom_ver2_val; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &phy_identifier); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &phy_identifier); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier & ~(1<<9))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1<<9))); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &rom_ver2_val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &rom_ver2_val); /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier | (1<<9))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1<<9))); return 0; } @@ -4978,11 +4990,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, switch (action) { case DISABLE_TX: - bnx2x_sfp_set_transmitter(params, phy, 0); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); break; case ENABLE_TX: if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) - bnx2x_sfp_set_transmitter(params, phy, 1); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", @@ -4991,38 +5003,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, } } -static void bnx2x_set_sfp_module_fault_led(struct link_params *params, - u8 gpio_mode) -{ - struct bnx2x *bp = params->bp; - - u32 fault_led_gpio = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) & - PORT_HW_CFG_FAULT_MODULE_LED_MASK; - switch (fault_led_gpio) { - case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: - return; - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: - { - u8 gpio_port = bnx2x_get_gpio_port(params); - u16 gpio_pin = fault_led_gpio - - PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; - DP(NETIF_MSG_LINK, "Set fault module-detected led " - "pin %x port %x mode %x\n", - gpio_pin, gpio_port, gpio_mode); - bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); - } - break; - default: - DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", - fault_led_gpio); - } -} - static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, struct link_params *params) { @@ -5040,14 +5020,15 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; - } else if (bnx2x_verify_sfp_module(phy, params) != 0) { + } else if (bnx2x_verify_sfp_module(phy, params) != + 0) { /* check SFP+ module compatibility */ DP(NETIF_MSG_LINK, "Module verification failed!!\n"); rc = -EINVAL; /* Turn on fault module-detected led */ - bnx2x_set_sfp_module_fault_led(params, - MISC_REGISTERS_GPIO_HIGH); - + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, + params->port); if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { @@ -5058,17 +5039,18 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, } } else { /* Turn off fault module-detected led */ - bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); + DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_LOW, + params->port); } /* power up the SFP module */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) bnx2x_8727_power_module(bp, phy, 1); - /* - * Check and set limiting mode / LRM mode on 8726. On 8727 it - * is done automatically - */ + /* Check and set limiting mode / LRM mode on 8726. + On 8727 it is done automatically */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); else @@ -5080,9 +5062,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, if (rc == 0 || (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 1); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); else - bnx2x_sfp_set_transmitter(params, phy, 0); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); return rc; } @@ -5095,9 +5077,11 @@ void bnx2x_handle_module_detect_int(struct link_params *params) u8 port = params->port; /* Set valid module led off */ - bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, + params->port); - /* Get current gpio val reflecting module plugged in / out*/ + /* Get current gpio val refelecting module plugged in / out*/ gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); /* Call the handling function in case module is detected */ @@ -5113,20 +5097,18 @@ void bnx2x_handle_module_detect_int(struct link_params *params) DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); } else { u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - config)); + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, port); - /* - * Module was plugged out. - * Disable transmit for this module - */ + /* Module was plugged out. */ + /* Disable transmit for this module */ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 0); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); } } @@ -5162,9 +5144,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" " link_status 0x%x\n", rx_sd, pcs_status, val2); - /* - * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status - * are set, or if the autoneg bit 1 is set + /* link is up if both bit 0 of pmd_rx_sd and + * bit 0 of pcs_status are set, or if the autoneg bit + * 1 is set */ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); if (link_up) { @@ -5185,15 +5167,14 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u32 tx_en_mode; - u16 cnt, val, tmp1; + u16 cnt, val; struct bnx2x *bp = params->bp; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); /* Wait until fw is loaded */ for (cnt = 0; cnt < 100; cnt++) { @@ -5260,26 +5241,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, 0x0004); } bnx2x_save_bcm_spirom_ver(bp, phy, params->port); - - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low - * power mode, if TX Laser is disabled - */ - - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) - & PORT_HW_CFG_TX_LASER_MASK; - - if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { - DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); - tmp1 |= 0x1; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); - } - return 0; } @@ -5314,26 +5275,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, /* Set soft reset */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); /* wait for 150ms for microcode load */ msleep(150); /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); msleep(200); bnx2x_save_bcm_spirom_ver(bp, phy, params->port); @@ -5368,18 +5329,23 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, u32 val; u32 swap_val, swap_override, aeu_gpio_mask, offset; DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); bnx2x_8726_external_rom_boot(phy, params); - /* - * Need to call module detected on initialization since the module - * detection triggered by actual module insertion might occur before - * driver is loaded, and when driver is loaded, it reset all - * registers, including the transmitter - */ + /* Need to call module detected on initialization since + the module detection triggered by actual module + insertion might occur before driver is loaded, and when + driver is loaded, it reset all registers, including the + transmitter */ bnx2x_sfp_module_detection(phy, params); if (phy->req_line_speed == SPEED_1000) { @@ -5412,10 +5378,8 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - /* - * Enable RX-ALARM control to receive interrupt for 1G speed - * change - */ + /* Enable RX-ALARM control to receive + interrupt for 1G speed change */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); bnx2x_cl45_write(bp, phy, @@ -5447,7 +5411,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, /* Set GPIO3 to trigger SFP+ module insertion/removal */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); + MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); /* The GPIO should be swapped if the swap register is set and active */ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); @@ -5538,7 +5502,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { u32 swap_val, swap_override; u8 port; - /* + /** * The PHY reset is controlled by GPIO 1. Fake the port number * to cancel the swap done in set_gpio() */ @@ -5547,21 +5511,20 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); port = (swap_val && swap_override) ^ 1; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u32 tx_en_mode; - u16 tmp1, val, mod_abs, tmp2; + u16 tmp1, val, mod_abs; u16 rx_alarm_ctrl_val; u16 lasi_ctrl_val; struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); rx_alarm_ctrl_val = (1<<2) | (1<<5) ; lasi_ctrl_val = 0x0004; @@ -5574,17 +5537,14 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); - /* - * Initially configure MOD_ABS to interrupt when module is - * presence( bit 8) - */ + /* Initially configure MOD_ABS to interrupt when + module is presence( bit 8) */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - /* - * Set EDC off by setting OPTXLOS signal input to low (bit 9). - * When the EDC is off it locks onto a reference clock and avoids - * becoming 'lost' - */ + /* Set EDC off by setting OPTXLOS signal input to low + (bit 9). + When the EDC is off it locks onto a reference clock and + avoids becoming 'lost'.*/ mod_abs &= ~(1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs &= ~(1<<9); @@ -5599,7 +5559,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, if (phy->flags & FLAGS_NOC) val |= (3<<5); - /* + /** * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 * status which reflect SFP+ module over-current */ @@ -5626,7 +5586,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* + /** * Power down the XAUI until link is up in case of dual-media * and 1G */ @@ -5652,7 +5612,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); } else { - /* + /** * Since the 8727 has only single reset pin, need to set the 10G * registers although it is default */ @@ -5668,8 +5628,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 0x0008); } - /* - * Set 2-wire transfer rate of SFP+ module EEPROM + /* Set 2-wire transfer rate of SFP+ module EEPROM * to 100Khz since some DACs(direct attached cables) do * not work at 400Khz. */ @@ -5692,26 +5651,6 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low - * power mode, if TX Laser is disabled - */ - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) - & PORT_HW_CFG_TX_LASER_MASK; - - if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { - - DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); - tmp2 |= 0x1000; - tmp2 &= 0xFFEF; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); - } - return 0; } @@ -5725,49 +5664,46 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, port_feature_config[params->port]. config)); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); if (mod_abs & (1<<8)) { /* Module is absent */ DP(NETIF_MSG_LINK, "MOD_ABS indication " "show module is absent\n"); - /* - * 1. Set mod_abs to detect next module - * presence event - * 2. Set EDC off by setting OPTXLOS signal input to low - * (bit 9). - * When the EDC is off it locks onto a reference clock and - * avoids becoming 'lost'. - */ + /* 1. Set mod_abs to detect next module + presence event + 2. Set EDC off by setting OPTXLOS signal input to low + (bit 9). + When the EDC is off it locks onto a reference clock and + avoids becoming 'lost'.*/ mod_abs &= ~(1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs &= ~(1<<9); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as - * the mod_abs wasn't changed - */ + /* Clear RX alarm since it stays up as long as + the mod_abs wasn't changed */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); } else { /* Module is present */ DP(NETIF_MSG_LINK, "MOD_ABS indication " "show module is present\n"); - /* - * First disable transmitter, and if the module is ok, the - * module_detection will enable it - * 1. Set mod_abs to detect next module absent event ( bit 8) - * 2. Restore the default polarity of the OPRXLOS signal and - * this signal will then correctly indicate the presence or - * absence of the Rx signal. (bit 9) - */ + /* First thing, disable transmitter, + and if the module is ok, the + module_detection will enable it*/ + + /* 1. Set mod_abs to detect next module + absent event ( bit 8) + 2. Restore the default polarity of the OPRXLOS signal and + this signal will then correctly indicate the presence or + absence of the Rx signal. (bit 9) */ mod_abs |= (1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs |= (1<<9); @@ -5775,12 +5711,10 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* - * Clear RX alarm since it stays up as long as the mod_abs - * wasn't changed. This is need to be done before calling the - * module detection, otherwise it will clear* the link update - * alarm - */ + /* Clear RX alarm since it stays up as long as + the mod_abs wasn't changed. This is need to be done + before calling the module detection, otherwise it will clear + the link update alarm */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); @@ -5788,7 +5722,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 0); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) bnx2x_sfp_module_detection(phy, params); @@ -5797,8 +5731,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, } DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", - rx_alarm_status); - /* No need to check link status in case of module plugged in/out */ + rx_alarm_status); + /* No need to check link status in case of + module plugged in/out */ } static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, @@ -5834,7 +5769,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - /* + /** * If a module is present and there is need to check * for over current */ @@ -5854,8 +5789,12 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, " Please remove the SFP+ module and" " restart the system to clear this" " error.\n", - params->port); - /* Disable all RX_ALARMs except for mod_abs */ + params->port); + + /* + * Disable all RX_ALARMs except for + * mod_abs + */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); @@ -5898,15 +5837,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); - /* - * Bits 0..2 --> speed detected, - * Bits 13..15--> link is down - */ + /* Bits 0..2 --> speed detected, + bits 13..15--> link is down */ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { link_up = 1; vars->line_speed = SPEED_10000; - DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", - params->port); } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { link_up = 1; vars->line_speed = SPEED_1000; @@ -5928,7 +5863,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_GP, &val1); - /* + /** * In case of dual-media board and 1G, power up the XAUI side, * otherwise power it down. For 10G it is done automatically */ @@ -5948,7 +5883,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; /* Disable Transmitter */ - bnx2x_sfp_set_transmitter(params, phy, 0); + bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); /* Clear LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); @@ -5960,23 +5895,19 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct link_params *params) { - u16 val, fw_ver1, fw_ver2, cnt, adj; + u16 val, fw_ver1, fw_ver2, cnt; struct bnx2x *bp = params->bp; - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; - /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); if (val & 1) break; udelay(5); @@ -5990,11 +5921,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); if (val & 1) break; udelay(5); @@ -6007,9 +5938,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, } /* lower 16 bits of the register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); /* upper 16 bits of register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, phy->ver_addr); @@ -6018,53 +5949,49 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, adj; - - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; + u16 val; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val); + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); val &= 0xFE00; val |= 0x0092; bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val); + MDIO_PMA_REG_8481_LINK_SIGNAL, val); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK + adj, + MDIO_PMA_REG_8481_LED1_MASK, 0x80); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK + adj, + MDIO_PMA_REG_8481_LED2_MASK, 0x18); /* Select activity source by Tx and Rx, as suggested by PHY AE */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK + adj, + MDIO_PMA_REG_8481_LED3_MASK, 0x0006); /* Select the closest activity blink rate to that in 10/100/1000 */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_BLINK + adj, + MDIO_PMA_REG_8481_LED3_BLINK, 0); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); /* 'Interrupt Mask' */ bnx2x_cl45_write(bp, phy, @@ -6078,11 +6005,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val; - /* - * This phy uses the NIG latch mechanism since link indication - * arrives through its LED4 and not via its LASI signal, so we - * get steady signal instead of clear on read - */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 1 << NIG_LATCH_BC_ENABLE_MI_INT); @@ -6207,11 +6130,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); return bnx2x_848xx_cmn_config_init(phy, params, vars); @@ -6223,15 +6146,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u8 port, initialize = 1; - u16 val, adj; + u16 val; u16 temp; - u32 actual_phy_selection, cms_enable; + u32 actual_phy_selection; u8 rc = 0; /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = 3; msleep(1); if (CHIP_IS_E2(bp)) @@ -6241,12 +6161,11 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); /* Wait for GPHY to come out of reset */ msleep(50); - /* - * BCM84823 requires that XGXS links up first @ 10G for normal behavior - */ + /* BCM84823 requires that XGXS links up first @ 10G for normal + behavior */ temp = vars->line_speed; vars->line_speed = SPEED_10000; bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); @@ -6256,7 +6175,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA + adj, &val); + MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | MDIO_CTL_REG_84823_MEDIA_LINE_MASK | MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | @@ -6289,7 +6208,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA + adj, val); + MDIO_CTL_REG_84823_MEDIA, val); DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); @@ -6297,43 +6216,23 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else bnx2x_save_848xx_spirom_version(phy, params); - cms_enable = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].default_cfg)) & - PORT_HW_CFG_ENABLE_CMS_MASK; - - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, &val); - if (cms_enable) - val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; - else - val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, val); - - return rc; } static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 val, val1, val2, adj; + u16 val, val1, val2; u8 link_up = 0; - /* Reg offset adjustment for 84833 */ - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; - /* Check 10G-BaseT link status */ /* Check PMD signal ok */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 0xFFFA, &val1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2); DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); @@ -6418,9 +6317,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); } static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, @@ -6442,8 +6341,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, else port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); } static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, @@ -6498,24 +6397,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x20); } else { bnx2x_cl45_write(bp, phy, @@ -6539,35 +6438,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, val |= 0x2492; bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x20); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); } else { bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x20); } break; @@ -6585,9 +6484,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, &val); if (!((val & - MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) - >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { - DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ + DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, @@ -6596,42 +6495,30 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x10); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x10); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x80); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x80); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x98); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x98); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x40); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x40); } else { bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x80); - - /* Tell LED3 to blink on source */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - &val); - val &= ~(7<<6); - val |= (1<<6); /* A83B[8:6]= 1 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - val); } break; } @@ -6658,10 +6545,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy, params); + bnx2x_wait_reset_complete(bp, phy); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); @@ -6708,7 +6595,9 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); link_up = ((val1 & 4) == 4); - /* if link is up print the AN outcome of the SFX7101 PHY */ + /* if link is up + * print the AN outcome of the SFX7101 PHY + */ if (link_up) { bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, @@ -6742,20 +6631,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) u16 val, cnt; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); for (cnt = 0; cnt < 10; cnt++) { msleep(50); /* Writes a self-clearing reset */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, - (val | (1<<15))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, + (val | (1<<15))); /* Wait for clear */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); if ((val & (1<<15)) == 0) break; @@ -6766,10 +6655,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { /* Low power mode is controlled by GPIO 2 */ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); /* The PHY reset is controlled by GPIO 1 */ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); } static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, @@ -6811,9 +6700,9 @@ static struct bnx2x_phy phy_null = { .supported = 0, .media_type = ETH_PHY_NOT_PRESENT, .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)NULL, @@ -6848,8 +6737,8 @@ static struct bnx2x_phy phy_serdes = { .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_init_serdes, @@ -6885,8 +6774,8 @@ static struct bnx2x_phy phy_xgxs = { .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_init_xgxs, @@ -6916,8 +6805,8 @@ static struct bnx2x_phy phy_7101 = { .media_type = ETH_PHY_BASE_T, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_7101_config_init, @@ -6947,9 +6836,9 @@ static struct bnx2x_phy phy_8073 = { SUPPORTED_Asym_Pause), .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_8073_config_init, @@ -7158,43 +7047,6 @@ static struct bnx2x_phy phy_84823 = { .phy_specific_func = (phy_specific_func_t)NULL }; -static struct bnx2x_phy phy_84833 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, - .addr = 0xff, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, - .def_md_devad = 0, - .reserved = 0, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; - /*****************************************************************/ /* */ /* Populate the phy according. Main function: bnx2x_populate_phy */ @@ -7208,7 +7060,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, /* Get the 4 lanes xgxs config rx and tx */ u32 rx = 0, tx = 0, i; for (i = 0; i < 2; i++) { - /* + /** * INT_PHY and EXT_PHY1 share the same value location in the * shmem. When num_phys is greater than 1, than this value * applies only to EXT_PHY1 @@ -7216,19 +7068,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, if (phy_index == INT_PHY || phy_index == EXT_PHY1) { rx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); tx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); + dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); } else { rx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); tx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); } phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); @@ -7348,9 +7200,6 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: *phy = phy_84823; break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - *phy = phy_84833; - break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; break; @@ -7365,21 +7214,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); - /* - * The shmem address of the phy version is located on different - * structures. In case this structure is too old, do not set - * the address - */ + /** + * The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, dev_info.shared_hw_config.config2)); if (phy_index == EXT_PHY1) { phy->ver_addr = shmem_base + offsetof(struct shmem_region, port_mb[port].ext_phy_fw_version); - /* Check specific mdc mdio settings */ - if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) - mdc_mdio_access = config2 & - SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; } else { u32 size = REG_RD(bp, shmem2_base); @@ -7398,7 +7247,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - /* + /** * In case mdc/mdio_access of the external phy is different than the * mdc/mdio access of the XGXS, a HW lock must be taken in each access * to prevent one port interfere with another port's CL45 operations. @@ -7433,20 +7282,18 @@ static void bnx2x_phy_def_cfg(struct link_params *params, /* Populate the default phy configuration for MF mode */ if (phy_index == EXT_PHY2) { link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. + offsetof(struct shmem_region, dev_info. port_feature_config[params->port].link_config2)); phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info. + offsetof(struct shmem_region, dev_info. port_hw_config[params->port].speed_capability_mask2)); } else { link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. + offsetof(struct shmem_region, dev_info. port_feature_config[params->port].link_config)); phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info. - port_hw_config[params->port].speed_capability_mask)); + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].speed_capability_mask)); } DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); @@ -7593,7 +7440,7 @@ static void set_phy_vars(struct link_params *params) else if (phy_index == EXT_PHY2) actual_phy_idx = EXT_PHY1; } - params->phy[actual_phy_idx].req_flow_ctrl = + params->phy[actual_phy_idx].req_flow_ctrl = params->req_flow_ctrl[link_cfg_idx]; params->phy[actual_phy_idx].req_line_speed = @@ -7646,6 +7493,57 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) set_phy_vars(params); DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); + if (CHIP_REV_IS_FPGA(bp)) { + + vars->link_up = 1; + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); + /* enable on E1.5 FPGA */ + if (CHIP_IS_E1H(bp)) { + vars->flow_ctrl |= + (BNX2X_FLOW_CTRL_TX | + BNX2X_FLOW_CTRL_RX); + vars->link_status |= + (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | + LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + } + + bnx2x_emac_enable(params, vars, 0); + if (!(CHIP_IS_E2(bp))) + bnx2x_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + /* disable drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + /* update shared memory */ + bnx2x_update_mng(params, vars->link_status); + + return 0; + + } else + if (CHIP_REV_IS_EMUL(bp)) { + + vars->link_up = 1; + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); + + bnx2x_bmac_enable(params, vars, 0); + + bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); + /* Disable drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + + params->port*4, 0); + + /* update shared memory */ + bnx2x_update_mng(params, vars->link_status); + + return 0; + + } else if (params->loopback_mode == LOOPBACK_BMAC) { vars->link_up = 1; @@ -7661,7 +7559,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) /* set bmac loopback */ bnx2x_bmac_enable(params, vars, 1); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + + params->port*4, 0); } else if (params->loopback_mode == LOOPBACK_EMAC) { @@ -7677,7 +7576,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) /* set bmac loopback */ bnx2x_emac_enable(params, vars, 1); bnx2x_emac_program(params, vars); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + + params->port*4, 0); } else if ((params->loopback_mode == LOOPBACK_XGXS) || (params->loopback_mode == LOOPBACK_EXT_PHY)) { @@ -7700,7 +7600,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_emac_program(params, vars); bnx2x_emac_enable(params, vars, 0); } else - bnx2x_bmac_enable(params, vars, 0); + bnx2x_bmac_enable(params, vars, 0); + if (params->loopback_mode == LOOPBACK_XGXS) { /* set 10G XGXS loopback */ params->phy[INT_PHY].config_loopback( @@ -7718,7 +7619,9 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) params); } } - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + + params->port*4, 0); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); @@ -7737,7 +7640,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) return 0; } u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy) + u8 reset_ext_phy) { struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; @@ -7746,10 +7649,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, vars->link_status = 0; bnx2x_update_mng(params, vars->link_status); bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); /* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); @@ -7817,13 +7720,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; u16 val; - s8 port = 0; + s8 port; s8 port_of_path = 0; - u32 swap_val, swap_override; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - port ^= (swap_val && swap_override); - bnx2x_ext_phy_hw_reset(bp, port); + + bnx2x_ext_phy_hw_reset(bp, 0); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; @@ -7848,22 +7748,21 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port_of_path*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); /* Need to take the phy out of low power mode in order to write to access its registers */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); /* Reset the phy */ bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 1<<15); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); } /* Add delay of 150ms after reset */ @@ -7892,20 +7791,18 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* Only set bit 10 = 1 (Tx power down) */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); /* Phase1 of TX_POWER_DOWN reset */ bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, - (val | 1<<10)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val | 1<<10)); } - /* - * Toggle Transmitter: Power down and then up with 600ms delay - * between - */ + /* Toggle Transmitter: Power down and then up with 600ms + delay between */ msleep(600); /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ @@ -7913,25 +7810,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* Phase2 of POWER_DOWN_RESET */ /* Release bit 10 (Release Tx power down) */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); msleep(15); /* Read modify write the SPI-ROM version select register */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); /* set GPIO2 back to LOW */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } return 0; } @@ -7978,90 +7875,32 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, /* Set fault module detected LED on */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, - port); + MISC_REGISTERS_GPIO_HIGH, + port); } return 0; } -static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, - u8 *io_gpio, u8 *io_port) -{ - - u32 phy_gpio_reset = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[PORT_0].default_cfg)); - switch (phy_gpio_reset) { - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0: - *io_gpio = 0; - *io_port = 0; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0: - *io_gpio = 1; - *io_port = 0; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0: - *io_gpio = 2; - *io_port = 0; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0: - *io_gpio = 3; - *io_port = 0; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1: - *io_gpio = 0; - *io_port = 1; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1: - *io_gpio = 1; - *io_port = 1; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1: - *io_gpio = 2; - *io_port = 1; - break; - case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1: - *io_gpio = 3; - *io_port = 1; - break; - default: - /* Don't override the io_gpio and io_port */ - break; - } -} static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 chip_id) { - s8 port, reset_gpio; + s8 port; u32 swap_val, swap_override; struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; s8 port_of_path; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - reset_gpio = MISC_REGISTERS_GPIO_1; port = 1; - /* - * Retrieve the reset gpio/port which control the reset. - * Default is GPIO1, PORT1 - */ - bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], - (u8 *)&reset_gpio, (u8 *)&port); + bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); /* Calculate the port based on port swap */ port ^= (swap_val && swap_override); - /* Initiate PHY reset*/ - bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); - msleep(1); - bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, - port); - msleep(5); /* PART1 - Reset both phys */ @@ -8097,7 +7936,9 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, /* Reset the phy */ bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); } /* Add delay of 150ms after reset */ @@ -8111,7 +7952,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, } /* PART2 - Download firmware to both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E2(bp)) + if (CHIP_IS_E2(bp)) port_of_path = 0; else port_of_path = port; @@ -8146,10 +7987,8 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - /* - * GPIO1 affects both ports, so there's need to pull - * it for single port alone - */ + /* GPIO1 affects both ports, so there's need to pull + it for single port alone */ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, shmem2_base_path, phy_index, chip_id); @@ -8159,15 +7998,11 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; default: DP(NETIF_MSG_LINK, - "ext_phy 0x%x common init not required\n", - ext_phy_type); + "bnx2x_common_init_phy: ext_phy 0x%x not required\n", + ext_phy_type); break; } - if (rc != 0) - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - 0); return rc; } @@ -8180,6 +8015,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], u32 ext_phy_type, ext_phy_config; DP(NETIF_MSG_LINK, "Begin common phy init\n"); + if (CHIP_REV_IS_EMUL(bp)) + return 0; + /* Check if common init was already done */ phy_ver = REG_RD(bp, shmem_base_path[0] + offsetof(struct shmem_region, diff --git a/trunk/drivers/net/bnx2x/bnx2x_link.h b/trunk/drivers/net/bnx2x/bnx2x_link.h index 92f36b6950dc..bedab1a942c4 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_link.h +++ b/trunk/drivers/net/bnx2x/bnx2x_link.h @@ -1,4 +1,4 @@ -/* Copyright 2008-2011 Broadcom Corporation +/* Copyright 2008-2010 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -33,7 +33,7 @@ #define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH #define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE -#define SPEED_AUTO_NEG 0 +#define SPEED_AUTO_NEG 0 #define SPEED_12000 12000 #define SPEED_12500 12500 #define SPEED_13000 13000 @@ -44,8 +44,8 @@ #define SFP_EEPROM_VENDOR_NAME_SIZE 16 #define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 #define SFP_EEPROM_VENDOR_OUI_SIZE 3 -#define SFP_EEPROM_PART_NO_ADDR 0x28 -#define SFP_EEPROM_PART_NO_SIZE 16 +#define SFP_EEPROM_PART_NO_ADDR 0x28 +#define SFP_EEPROM_PART_NO_SIZE 16 #define PWR_FLT_ERR_MSG_LEN 250 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ @@ -62,7 +62,7 @@ #define SINGLE_MEDIA(params) (params->num_phys == 2) /* Dual Media board contains two external phy with different media */ #define DUAL_MEDIA(params) (params->num_phys == 3) -#define FW_PARAM_MDIO_CTRL_OFFSET 16 +#define FW_PARAM_MDIO_CTRL_OFFSET 16 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) @@ -201,14 +201,12 @@ struct link_params { /* Default / User Configuration */ u8 loopback_mode; -#define LOOPBACK_NONE 0 -#define LOOPBACK_EMAC 1 -#define LOOPBACK_BMAC 2 +#define LOOPBACK_NONE 0 +#define LOOPBACK_EMAC 1 +#define LOOPBACK_BMAC 2 #define LOOPBACK_XGXS 3 #define LOOPBACK_EXT_PHY 4 -#define LOOPBACK_EXT 5 -#define LOOPBACK_UMAC 6 -#define LOOPBACK_XMAC 7 +#define LOOPBACK_EXT 5 /* Device parameters */ u8 mac_addr[6]; @@ -232,11 +230,10 @@ struct link_params { /* Phy register parameter */ u32 chip_id; - /* features */ u32 feature_config_flags; -#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) -#define FEATURE_CONFIG_PFC_ENABLED (1<<1) -#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) +#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) +#define FEATURE_CONFIG_PFC_ENABLED (1<<1) +#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) /* Will be populated during common init */ struct bnx2x_phy phy[MAX_PHYS]; @@ -337,11 +334,6 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); /* Reset the external of SFX7101 */ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); -/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ -u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf); - void bnx2x_hw_reset_phy(struct link_params *params); /* Checks if HW lock is required for this phy/board type */ @@ -387,7 +379,7 @@ void bnx2x_ets_disabled(struct link_params *params); /* Used to configure the ETS to BW limited */ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, - const u32 cos1_bw); + const u32 cos1_bw); /* Used to configure the ETS to strict */ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); diff --git a/trunk/drivers/net/bnx2x/bnx2x_main.c b/trunk/drivers/net/bnx2x/bnx2x_main.c index 2215a39f74fb..8cdcf5b39d1e 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/bnx2x/bnx2x_main.c @@ -5296,6 +5296,10 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) } } + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); + bnx2x_setup_fan_failure_detection(bp); /* clear PXP2 attentions */ @@ -5499,6 +5503,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, MCP_BLOCK, init_stage); bnx2x_init_block(bp, DMAE_BLOCK, init_stage); + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, bp->common.shmem2_base, port)) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : @@ -8372,17 +8379,6 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) bp->mdio.prtad = XGXS_EXT_PHY_ADDR(ext_phy_config); - - /* - * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) - * In MF mode, it is set to cover self test cases - */ - if (IS_MF(bp)) - bp->port.need_hw_lock = 1; - else - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); } static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) @@ -9866,8 +9862,7 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) int rc = 0; mutex_lock(&bp->cnic_mutex); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_mutex)); + c_ops = bp->cnic_ops; if (c_ops) rc = c_ops->cnic_ctl(bp->cnic_data, ctl); mutex_unlock(&bp->cnic_mutex); diff --git a/trunk/drivers/net/bnx2x/bnx2x_reg.h b/trunk/drivers/net/bnx2x/bnx2x_reg.h index 1c89f19a4425..e01330bb36c7 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_reg.h +++ b/trunk/drivers/net/bnx2x/bnx2x_reg.h @@ -6083,7 +6083,6 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 #define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e #define MDIO_PMA_REG_8727_PCS_GP 0xc842 -#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4 #define MDIO_AN_REG_8727_MISC_CTRL 0x8309 diff --git a/trunk/drivers/net/bonding/bond_3ad.c b/trunk/drivers/net/bonding/bond_3ad.c index 1024ae158227..171782e2bb39 100644 --- a/trunk/drivers/net/bonding/bond_3ad.c +++ b/trunk/drivers/net/bonding/bond_3ad.c @@ -2470,10 +2470,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac if (!(dev->flags & IFF_MASTER)) goto out; - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - goto out; - if (!pskb_may_pull(skb, sizeof(struct lacpdu))) goto out; diff --git a/trunk/drivers/net/bonding/bond_alb.c b/trunk/drivers/net/bonding/bond_alb.c index 5c6fba802f2b..f4e638c65129 100644 --- a/trunk/drivers/net/bonding/bond_alb.c +++ b/trunk/drivers/net/bonding/bond_alb.c @@ -326,10 +326,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct goto out; } - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - goto out; - if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) goto out; diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 1df9f0ea9184..b1025b85acf1 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1372,8 +1372,8 @@ static int bond_compute_features(struct bonding *bond) { struct slave *slave; struct net_device *bond_dev = bond->dev; - u32 features = bond_dev->features; - u32 vlan_features = 0; + unsigned long features = bond_dev->features; + unsigned long vlan_features = 0; unsigned short max_hard_header_len = max((u16)ETH_HLEN, bond_dev->hard_header_len); int i; @@ -1400,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond) done: features |= (bond_dev->features & BOND_VLAN_FEATURES); - bond_dev->features = netdev_fix_features(bond_dev, features); - bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features); + bond_dev->features = netdev_fix_features(features, NULL); + bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); bond_dev->hard_header_len = max_hard_header_len; return 0; @@ -2733,10 +2733,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack if (!slave || !slave_do_arp_validate(bond, slave)) goto out_unlock; - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - goto out_unlock; - if (!pskb_may_pull(skb, arp_hdr_len(dev))) goto out_unlock; diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c index 72bb0f6cc9bf..8fd0174c5380 100644 --- a/trunk/drivers/net/bonding/bond_sysfs.c +++ b/trunk/drivers/net/bonding/bond_sysfs.c @@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d, bond->dev->name, new_value); } out: - return ret; + return count; } static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier); @@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, } } out: - return ret; + return count; } static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, bonding_show_slaves_active, bonding_store_slaves_active); diff --git a/trunk/drivers/net/can/Kconfig b/trunk/drivers/net/can/Kconfig index 5dec456fd4a4..d5a9db60ade9 100644 --- a/trunk/drivers/net/can/Kconfig +++ b/trunk/drivers/net/can/Kconfig @@ -23,7 +23,7 @@ config CAN_SLCAN As only the sending and receiving of CAN frames is implemented, this driver should work with the (serial/USB) CAN hardware from: - www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de + www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de Userspace tools to attach the SLCAN line discipline (slcan_attach, slcand) can be found in the can-utils at the SocketCAN SVN, see @@ -117,8 +117,6 @@ source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/usb/Kconfig" -source "drivers/net/can/softing/Kconfig" - config CAN_DEBUG_DEVICES bool "CAN devices debugging messages" depends on CAN diff --git a/trunk/drivers/net/can/Makefile b/trunk/drivers/net/can/Makefile index 53c82a71778e..07ca159ba3f9 100644 --- a/trunk/drivers/net/can/Makefile +++ b/trunk/drivers/net/can/Makefile @@ -9,7 +9,6 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o can-dev-y := dev.o obj-y += usb/ -obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ diff --git a/trunk/drivers/net/can/at91_can.c b/trunk/drivers/net/can/at91_can.c index 2532b9631538..7ef83d06f7ed 100644 --- a/trunk/drivers/net/can/at91_can.c +++ b/trunk/drivers/net/can/at91_can.c @@ -2,7 +2,7 @@ * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch - * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde + * (C) 2008, 2009, 2010 by Marc Kleine-Budde * * This software may be distributed under the terms of the GNU General * Public License ("GPL") version 2 as distributed in the 'COPYING' @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -41,23 +40,22 @@ #include -#define AT91_NAPI_WEIGHT 11 +#define AT91_NAPI_WEIGHT 12 /* * RX/TX Mailbox split * don't dare to touch */ -#define AT91_MB_RX_NUM 11 +#define AT91_MB_RX_NUM 12 #define AT91_MB_TX_SHIFT 2 -#define AT91_MB_RX_FIRST 1 +#define AT91_MB_RX_FIRST 0 #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) #define AT91_MB_RX_SPLIT 8 #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) -#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \ - ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST)) +#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) @@ -170,8 +168,6 @@ struct at91_priv { struct clk *clk; struct at91_can_data *pdata; - - canid_t mb0_id; }; static struct can_bittiming_const at91_bittiming_const = { @@ -224,18 +220,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, set_mb_mode_prio(priv, mb, mode, 0); } -static inline u32 at91_can_id_to_reg_mid(canid_t can_id) -{ - u32 reg_mid; - - if (can_id & CAN_EFF_FLAG) - reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; - else - reg_mid = (can_id & CAN_SFF_MASK) << 18; - - return reg_mid; -} - /* * Swtich transceiver on or off */ @@ -249,22 +233,12 @@ static void at91_setup_mailboxes(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); unsigned int i; - u32 reg_mid; /* - * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first - * mailbox is disabled. The next 11 mailboxes are used as a - * reception FIFO. The last mailbox is configured with - * overwrite option. The overwrite flag indicates a FIFO - * overflow. + * The first 12 mailboxes are used as a reception FIFO. The + * last mailbox is configured with overwrite option. The + * overwrite flag indicates a FIFO overflow. */ - reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); - for (i = 0; i < AT91_MB_RX_FIRST; i++) { - set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); - at91_write(priv, AT91_MID(i), reg_mid); - at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ - } - for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) set_mb_mode(priv, i, AT91_MB_MODE_RX); set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); @@ -280,8 +254,7 @@ static void at91_setup_mailboxes(struct net_device *dev) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); /* Reset tx and rx helper pointers */ - priv->tx_next = priv->tx_echo = 0; - priv->rx_next = AT91_MB_RX_FIRST; + priv->tx_next = priv->tx_echo = priv->rx_next = 0; } static int at91_set_bittiming(struct net_device *dev) @@ -399,7 +372,12 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); return NETDEV_TX_BUSY; } - reg_mid = at91_can_id_to_reg_mid(cf->can_id); + + if (cf->can_id & CAN_EFF_FLAG) + reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE; + else + reg_mid = (cf->can_id & CAN_SFF_MASK) << 18; + reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | (cf->can_dlc << 16) | AT91_MCR_MTCR; @@ -561,31 +539,27 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) * * Theory of Operation: * - * 11 of the 16 mailboxes on the chip are reserved for RX. we split - * them into 2 groups. The lower group holds 7 and upper 4 mailboxes. + * 12 of the 16 mailboxes on the chip are reserved for RX. we split + * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. * * Like it or not, but the chip always saves a received CAN message * into the first free mailbox it finds (starting with the * lowest). This makes it very difficult to read the messages in the * right order from the chip. This is how we work around that problem: * - * The first message goes into mb nr. 1 and issues an interrupt. All + * The first message goes into mb nr. 0 and issues an interrupt. All * rx ints are disabled in the interrupt handler and a napi poll is * scheduled. We read the mailbox, but do _not_ reenable the mb (to * receive another message). * * lower mbxs upper - * ____^______ __^__ - * / \ / \ + * ______^______ __^__ + * / \ / \ * +-+-+-+-+-+-+-+-++-+-+-+-+ - * | |x|x|x|x|x|x|x|| | | | | + * |x|x|x|x|x|x|x|x|| | | | | * +-+-+-+-+-+-+-+-++-+-+-+-+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail * 0 1 2 3 4 5 6 7 8 9 0 1 / box - * ^ - * | - * \ - * unused, due to chip bug * * The variable priv->rx_next points to the next mailbox to read a * message from. As long we're in the lower mailboxes we just read the @@ -616,10 +590,10 @@ static int at91_poll_rx(struct net_device *dev, int quota) "order of incoming frames cannot be guaranteed\n"); again: - for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); - mb < AT91_MB_RX_LAST + 1 && quota > 0; + for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); + mb < AT91_MB_RX_NUM && quota > 0; reg_sr = at91_read(priv, AT91_SR), - mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { + mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { at91_read_msg(dev, mb); /* reactivate mailboxes */ @@ -636,8 +610,8 @@ static int at91_poll_rx(struct net_device *dev, int quota) /* upper group completed, look again in lower */ if (priv->rx_next > AT91_MB_RX_LOW_LAST && - quota > 0 && mb > AT91_MB_RX_LAST) { - priv->rx_next = AT91_MB_RX_FIRST; + quota > 0 && mb >= AT91_MB_RX_NUM) { + priv->rx_next = 0; goto again; } @@ -1063,64 +1037,6 @@ static const struct net_device_ops at91_netdev_ops = { .ndo_start_xmit = at91_start_xmit, }; -static ssize_t at91_sysfs_show_mb0_id(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct at91_priv *priv = netdev_priv(to_net_dev(dev)); - - if (priv->mb0_id & CAN_EFF_FLAG) - return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); - else - return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); -} - -static ssize_t at91_sysfs_set_mb0_id(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct net_device *ndev = to_net_dev(dev); - struct at91_priv *priv = netdev_priv(ndev); - unsigned long can_id; - ssize_t ret; - int err; - - rtnl_lock(); - - if (ndev->flags & IFF_UP) { - ret = -EBUSY; - goto out; - } - - err = strict_strtoul(buf, 0, &can_id); - if (err) { - ret = err; - goto out; - } - - if (can_id & CAN_EFF_FLAG) - can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; - else - can_id &= CAN_SFF_MASK; - - priv->mb0_id = can_id; - ret = count; - - out: - rtnl_unlock(); - return ret; -} - -static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO, - at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); - -static struct attribute *at91_sysfs_attrs[] = { - &dev_attr_mb0_id.attr, - NULL, -}; - -static struct attribute_group at91_sysfs_attr_group = { - .attrs = at91_sysfs_attrs, -}; - static int __devinit at91_can_probe(struct platform_device *pdev) { struct net_device *dev; @@ -1166,7 +1082,6 @@ static int __devinit at91_can_probe(struct platform_device *pdev) dev->netdev_ops = &at91_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; - dev->sysfs_groups[0] = &at91_sysfs_attr_group; priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); @@ -1178,7 +1093,6 @@ static int __devinit at91_can_probe(struct platform_device *pdev) priv->dev = dev; priv->clk = clk; priv->pdata = pdev->dev.platform_data; - priv->mb0_id = 0x7ff; netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); diff --git a/trunk/drivers/net/can/softing/Kconfig b/trunk/drivers/net/can/softing/Kconfig deleted file mode 100644 index 92bd6bdde5e3..000000000000 --- a/trunk/drivers/net/can/softing/Kconfig +++ /dev/null @@ -1,30 +0,0 @@ -config CAN_SOFTING - tristate "Softing Gmbh CAN generic support" - depends on CAN_DEV - ---help--- - Support for CAN cards from Softing Gmbh & some cards - from Vector Gmbh. - Softing Gmbh CAN cards come with 1 or 2 physical busses. - Those cards typically use Dual Port RAM to communicate - with the host CPU. The interface is then identical for PCI - and PCMCIA cards. This driver operates on a platform device, - which has been created by softing_cs or softing_pci driver. - Warning: - The API of the card does not allow fine control per bus, but - controls the 2 busses on the card together. - As such, some actions (start/stop/busoff recovery) on 1 bus - must bring down the other bus too temporarily. - -config CAN_SOFTING_CS - tristate "Softing Gmbh CAN pcmcia cards" - depends on PCMCIA - select CAN_SOFTING - ---help--- - Support for PCMCIA cards from Softing Gmbh & some cards - from Vector Gmbh. - You need firmware for these, which you can get at - http://developer.berlios.de/projects/socketcan/ - This version of the driver is written against - firmware version 4.6 (softing-fw-4.6-binaries.tar.gz) - In order to use the card as CAN device, you need the Softing generic - support too. diff --git a/trunk/drivers/net/can/softing/Makefile b/trunk/drivers/net/can/softing/Makefile deleted file mode 100644 index c5e5016c742e..000000000000 --- a/trunk/drivers/net/can/softing/Makefile +++ /dev/null @@ -1,6 +0,0 @@ - -softing-y := softing_main.o softing_fw.o -obj-$(CONFIG_CAN_SOFTING) += softing.o -obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/can/softing/softing.h b/trunk/drivers/net/can/softing/softing.h deleted file mode 100644 index 7ec9f4db3d52..000000000000 --- a/trunk/drivers/net/can/softing/softing.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * softing common interfaces - * - * by Kurt Van Dijck, 2008-2010 - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "softing_platform.h" - -struct softing; - -struct softing_priv { - struct can_priv can; /* must be the first member! */ - struct net_device *netdev; - struct softing *card; - struct { - int pending; - /* variables wich hold the circular buffer */ - int echo_put; - int echo_get; - } tx; - struct can_bittiming_const btr_const; - int index; - uint8_t output; - uint16_t chip; -}; -#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev)) - -struct softing { - const struct softing_platform_data *pdat; - struct platform_device *pdev; - struct net_device *net[2]; - spinlock_t spin; /* protect this structure & DPRAM access */ - ktime_t ts_ref; - ktime_t ts_overflow; /* timestamp overflow value, in ktime */ - - struct { - /* indication of firmware status */ - int up; - /* protection of the 'up' variable */ - struct mutex lock; - } fw; - struct { - int nr; - int requested; - int svc_count; - unsigned int dpram_position; - } irq; - struct { - int pending; - int last_bus; - /* - * keep the bus that last tx'd a message, - * in order to let every netdev queue resume - */ - } tx; - __iomem uint8_t *dpram; - unsigned long dpram_phys; - unsigned long dpram_size; - struct { - uint16_t fw_version, hw_version, license, serial; - uint16_t chip[2]; - unsigned int freq; /* remote cpu's operating frequency */ - } id; -}; - -extern int softing_default_output(struct net_device *netdev); - -extern ktime_t softing_raw2ktime(struct softing *card, u32 raw); - -extern int softing_chip_poweron(struct softing *card); - -extern int softing_bootloader_command(struct softing *card, int16_t cmd, - const char *msg); - -/* Load firmware after reset */ -extern int softing_load_fw(const char *file, struct softing *card, - __iomem uint8_t *virt, unsigned int size, int offset); - -/* Load final application firmware after bootloader */ -extern int softing_load_app_fw(const char *file, struct softing *card); - -/* - * enable or disable irq - * only called with fw.lock locked - */ -extern int softing_enable_irq(struct softing *card, int enable); - -/* start/stop 1 bus on card */ -extern int softing_startstop(struct net_device *netdev, int up); - -/* netif_rx() */ -extern int softing_netdev_rx(struct net_device *netdev, - const struct can_frame *msg, ktime_t ktime); - -/* SOFTING DPRAM mappings */ -#define DPRAM_RX 0x0000 - #define DPRAM_RX_SIZE 32 - #define DPRAM_RX_CNT 16 -#define DPRAM_RX_RD 0x0201 /* uint8_t */ -#define DPRAM_RX_WR 0x0205 /* uint8_t */ -#define DPRAM_RX_LOST 0x0207 /* uint8_t */ - -#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */ -#define DPRAM_FCT_RESULT 0x0328 /* int16_t */ -#define DPRAM_FCT_HOST 0x032b /* uint16_t */ - -#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */ -#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */ -#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */ -#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */ -#define DPRAM_RESET 0x0341 /* uint16_t */ -#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */ -#define DPRAM_RESET_TIME 0x034d /* uint16_t */ -#define DPRAM_TIME 0x0350 /* uint64_t */ -#define DPRAM_WR_START 0x0358 /* uint8_t */ -#define DPRAM_WR_END 0x0359 /* uint8_t */ -#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */ -#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */ -#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */ -#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */ -#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */ - -#define DPRAM_TX 0x0400 /* uint16_t */ - #define DPRAM_TX_SIZE 16 - #define DPRAM_TX_CNT 32 -#define DPRAM_TX_RD 0x0601 /* uint8_t */ -#define DPRAM_TX_WR 0x0605 /* uint8_t */ - -#define DPRAM_COMMAND 0x07e0 /* uint16_t */ -#define DPRAM_RECEIPT 0x07f0 /* uint16_t */ -#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */ -#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */ - -#define DPRAM_V2_RESET 0x0e00 /* uint8_t */ -#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */ - -#define TXMAX (DPRAM_TX_CNT - 1) - -/* DPRAM return codes */ -#define RES_NONE 0 -#define RES_OK 1 -#define RES_NOK 2 -#define RES_UNKNOWN 3 -/* DPRAM flags */ -#define CMD_TX 0x01 -#define CMD_ACK 0x02 -#define CMD_XTD 0x04 -#define CMD_RTR 0x08 -#define CMD_ERR 0x10 -#define CMD_BUS2 0x80 - -/* returned fifo entry bus state masks */ -#define SF_MASK_BUSOFF 0x80 -#define SF_MASK_EPASSIVE 0x60 - -/* bus states */ -#define STATE_BUSOFF 2 -#define STATE_EPASSIVE 1 -#define STATE_EACTIVE 0 diff --git a/trunk/drivers/net/can/softing/softing_cs.c b/trunk/drivers/net/can/softing/softing_cs.c deleted file mode 100644 index 300fe75dd1a7..000000000000 --- a/trunk/drivers/net/can/softing/softing_cs.c +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright (C) 2008-2010 - * - * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include - -#include -#include - -#include "softing_platform.h" - -static int softingcs_index; -static spinlock_t softingcs_index_lock; - -static int softingcs_reset(struct platform_device *pdev, int v); -static int softingcs_enable_irq(struct platform_device *pdev, int v); - -/* - * platform_data descriptions - */ -#define MHZ (1000*1000) -static const struct softing_platform_data softingcs_platform_data[] = { -{ - .name = "CANcard", - .manf = 0x0168, .prod = 0x001, - .generation = 1, - .nbus = 2, - .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "CANcard-NEC", - .manf = 0x0168, .prod = 0x002, - .generation = 1, - .nbus = 2, - .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "CANcard-SJA", - .manf = 0x0168, .prod = 0x004, - .generation = 1, - .nbus = 2, - .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "CANcard-2", - .manf = 0x0168, .prod = 0x005, - .generation = 2, - .nbus = 2, - .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, - .dpram_size = 0x1000, - .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, - .reset = softingcs_reset, - .enable_irq = NULL, -}, { - .name = "Vector-CANcard", - .manf = 0x0168, .prod = 0x081, - .generation = 1, - .nbus = 2, - .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "Vector-CANcard-SJA", - .manf = 0x0168, .prod = 0x084, - .generation = 1, - .nbus = 2, - .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "Vector-CANcard-2", - .manf = 0x0168, .prod = 0x085, - .generation = 2, - .nbus = 2, - .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, - .dpram_size = 0x1000, - .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, - .reset = softingcs_reset, - .enable_irq = NULL, -}, { - .name = "EDICcard-NEC", - .manf = 0x0168, .prod = 0x102, - .generation = 1, - .nbus = 2, - .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, - .dpram_size = 0x0800, - .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, - .reset = softingcs_reset, - .enable_irq = softingcs_enable_irq, -}, { - .name = "EDICcard-2", - .manf = 0x0168, .prod = 0x105, - .generation = 2, - .nbus = 2, - .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, - .dpram_size = 0x1000, - .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, - .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, - .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, - .reset = softingcs_reset, - .enable_irq = NULL, -}, { - 0, 0, -}, -}; - -MODULE_FIRMWARE(fw_dir "bcard.bin"); -MODULE_FIRMWARE(fw_dir "ldcard.bin"); -MODULE_FIRMWARE(fw_dir "cancard.bin"); -MODULE_FIRMWARE(fw_dir "cansja.bin"); - -MODULE_FIRMWARE(fw_dir "bcard2.bin"); -MODULE_FIRMWARE(fw_dir "ldcard2.bin"); -MODULE_FIRMWARE(fw_dir "cancrd2.bin"); - -static __devinit const struct softing_platform_data -*softingcs_find_platform_data(unsigned int manf, unsigned int prod) -{ - const struct softing_platform_data *lp; - - for (lp = softingcs_platform_data; lp->manf; ++lp) { - if ((lp->manf == manf) && (lp->prod == prod)) - return lp; - } - return NULL; -} - -/* - * platformdata callbacks - */ -static int softingcs_reset(struct platform_device *pdev, int v) -{ - struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); - - dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20); - return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20); -} - -static int softingcs_enable_irq(struct platform_device *pdev, int v) -{ - struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); - - dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0); - return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0); -} - -/* - * pcmcia check - */ -static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia, - void *priv_data) -{ - struct softing_platform_data *pdat = priv_data; - struct resource *pres; - int memspeed = 0; - - WARN_ON(!pdat); - pres = pcmcia->resource[PCMCIA_IOMEM_0]; - if (resource_size(pres) < 0x1000) - return -ERANGE; - - pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; - if (pdat->generation < 2) { - pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8; - memspeed = 3; - } else { - pres->flags |= WIN_DATA_WIDTH_16; - } - return pcmcia_request_window(pcmcia, pres, memspeed); -} - -static __devexit void softingcs_remove(struct pcmcia_device *pcmcia) -{ - struct platform_device *pdev = pcmcia->priv; - - /* free bits */ - platform_device_unregister(pdev); - /* release pcmcia stuff */ - pcmcia_disable_device(pcmcia); -} - -/* - * platform_device wrapper - * pdev->resource has 2 entries: io & irq - */ -static void softingcs_pdev_release(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - kfree(pdev); -} - -static __devinit int softingcs_probe(struct pcmcia_device *pcmcia) -{ - int ret; - struct platform_device *pdev; - const struct softing_platform_data *pdat; - struct resource *pres; - struct dev { - struct platform_device pdev; - struct resource res[2]; - } *dev; - - /* find matching platform_data */ - pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id); - if (!pdat) - return -ENOTTY; - - /* setup pcmcia device */ - pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM | - CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; - ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat); - if (ret) - goto pcmcia_failed; - - ret = pcmcia_enable_device(pcmcia); - if (ret < 0) - goto pcmcia_failed; - - pres = pcmcia->resource[PCMCIA_IOMEM_0]; - if (!pres) { - ret = -EBADF; - goto pcmcia_bad; - } - - /* create softing platform device */ - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - ret = -ENOMEM; - goto mem_failed; - } - dev->pdev.resource = dev->res; - dev->pdev.num_resources = ARRAY_SIZE(dev->res); - dev->pdev.dev.release = softingcs_pdev_release; - - pdev = &dev->pdev; - pdev->dev.platform_data = (void *)pdat; - pdev->dev.parent = &pcmcia->dev; - pcmcia->priv = pdev; - - /* platform device resources */ - pdev->resource[0].flags = IORESOURCE_MEM; - pdev->resource[0].start = pres->start; - pdev->resource[0].end = pres->end; - - pdev->resource[1].flags = IORESOURCE_IRQ; - pdev->resource[1].start = pcmcia->irq; - pdev->resource[1].end = pdev->resource[1].start; - - /* platform device setup */ - spin_lock(&softingcs_index_lock); - pdev->id = softingcs_index++; - spin_unlock(&softingcs_index_lock); - pdev->name = "softing"; - dev_set_name(&pdev->dev, "softingcs.%i", pdev->id); - ret = platform_device_register(pdev); - if (ret < 0) - goto platform_failed; - - dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev)); - return 0; - -platform_failed: - kfree(dev); -mem_failed: -pcmcia_bad: -pcmcia_failed: - pcmcia_disable_device(pcmcia); - pcmcia->priv = NULL; - return ret ?: -ENODEV; -} - -static /*const*/ struct pcmcia_device_id softingcs_ids[] = { - /* softing */ - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005), - /* vector, manufacturer? */ - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085), - /* EDIC */ - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102), - PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105), - PCMCIA_DEVICE_NULL, -}; - -MODULE_DEVICE_TABLE(pcmcia, softingcs_ids); - -static struct pcmcia_driver softingcs_driver = { - .owner = THIS_MODULE, - .name = "softingcs", - .id_table = softingcs_ids, - .probe = softingcs_probe, - .remove = __devexit_p(softingcs_remove), -}; - -static int __init softingcs_start(void) -{ - spin_lock_init(&softingcs_index_lock); - return pcmcia_register_driver(&softingcs_driver); -} - -static void __exit softingcs_stop(void) -{ - pcmcia_unregister_driver(&softingcs_driver); -} - -module_init(softingcs_start); -module_exit(softingcs_stop); - -MODULE_DESCRIPTION("softing CANcard driver" - ", links PCMCIA card to softing driver"); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/softing/softing_fw.c b/trunk/drivers/net/can/softing/softing_fw.c deleted file mode 100644 index b520784fb197..000000000000 --- a/trunk/drivers/net/can/softing/softing_fw.c +++ /dev/null @@ -1,691 +0,0 @@ -/* - * Copyright (C) 2008-2010 - * - * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include - -#include "softing.h" - -/* - * low level DPRAM command. - * Make sure that card->dpram[DPRAM_FCT_HOST] is preset - */ -static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector, - const char *msg) -{ - int ret; - unsigned long stamp; - - iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]); - iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]); - iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]); - /* be sure to flush this to the card */ - wmb(); - stamp = jiffies + 1 * HZ; - /* wait for card */ - do { - /* DPRAM_FCT_HOST is _not_ aligned */ - ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) + - (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8); - /* don't have any cached variables */ - rmb(); - if (ret == RES_OK) - /* read return-value now */ - return ioread16(&card->dpram[DPRAM_FCT_RESULT]); - - if ((ret != vector) || time_after(jiffies, stamp)) - break; - /* process context => relax */ - usleep_range(500, 10000); - } while (1); - - ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; - dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret); - return ret; -} - -static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg) -{ - int ret; - - ret = _softing_fct_cmd(card, cmd, 0, msg); - if (ret > 0) { - dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret); - ret = -EIO; - } - return ret; -} - -int softing_bootloader_command(struct softing *card, int16_t cmd, - const char *msg) -{ - int ret; - unsigned long stamp; - - iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]); - iowrite16(cmd, &card->dpram[DPRAM_COMMAND]); - /* be sure to flush this to the card */ - wmb(); - stamp = jiffies + 3 * HZ; - /* wait for card */ - do { - ret = ioread16(&card->dpram[DPRAM_RECEIPT]); - /* don't have any cached variables */ - rmb(); - if (ret == RES_OK) - return 0; - if (time_after(jiffies, stamp)) - break; - /* process context => relax */ - usleep_range(500, 10000); - } while (!signal_pending(current)); - - ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; - dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret); - return ret; -} - -static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr, - uint16_t *plen, const uint8_t **pdat) -{ - uint16_t checksum[2]; - const uint8_t *mem; - const uint8_t *end; - - /* - * firmware records are a binary, unaligned stream composed of: - * uint16_t type; - * uint32_t addr; - * uint16_t len; - * uint8_t dat[len]; - * uint16_t checksum; - * all values in little endian. - * We could define a struct for this, with __attribute__((packed)), - * but would that solve the alignment in _all_ cases (cfr. the - * struct itself may be an odd address)? - * - * I chose to use leXX_to_cpup() since this solves both - * endianness & alignment. - */ - mem = *pmem; - *ptype = le16_to_cpup((void *)&mem[0]); - *paddr = le32_to_cpup((void *)&mem[2]); - *plen = le16_to_cpup((void *)&mem[6]); - *pdat = &mem[8]; - /* verify checksum */ - end = &mem[8 + *plen]; - checksum[0] = le16_to_cpup((void *)end); - for (checksum[1] = 0; mem < end; ++mem) - checksum[1] += *mem; - if (checksum[0] != checksum[1]) - return -EINVAL; - /* increment */ - *pmem += 10 + *plen; - return 0; -} - -int softing_load_fw(const char *file, struct softing *card, - __iomem uint8_t *dpram, unsigned int size, int offset) -{ - const struct firmware *fw; - int ret; - const uint8_t *mem, *end, *dat; - uint16_t type, len; - uint32_t addr; - uint8_t *buf = NULL; - int buflen = 0; - int8_t type_end = 0; - - ret = request_firmware(&fw, file, &card->pdev->dev); - if (ret < 0) - return ret; - dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes" - ", offset %c0x%04x\n", - card->pdat->name, file, (unsigned int)fw->size, - (offset >= 0) ? '+' : '-', (unsigned int)abs(offset)); - /* parse the firmware */ - mem = fw->data; - end = &mem[fw->size]; - /* look for header record */ - ret = fw_parse(&mem, &type, &addr, &len, &dat); - if (ret < 0) - goto failed; - if (type != 0xffff) - goto failed; - if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) { - ret = -EINVAL; - goto failed; - } - /* ok, we had a header */ - while (mem < end) { - ret = fw_parse(&mem, &type, &addr, &len, &dat); - if (ret < 0) - goto failed; - if (type == 3) { - /* start address, not used here */ - continue; - } else if (type == 1) { - /* eof */ - type_end = 1; - break; - } else if (type != 0) { - ret = -EINVAL; - goto failed; - } - - if ((addr + len + offset) > size) - goto failed; - memcpy_toio(&dpram[addr + offset], dat, len); - /* be sure to flush caches from IO space */ - mb(); - if (len > buflen) { - /* align buflen */ - buflen = (len + (1024-1)) & ~(1024-1); - buf = krealloc(buf, buflen, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto failed; - } - } - /* verify record data */ - memcpy_fromio(buf, &dpram[addr + offset], len); - if (memcmp(buf, dat, len)) { - /* is not ok */ - dev_alert(&card->pdev->dev, "DPRAM readback failed\n"); - ret = -EIO; - goto failed; - } - } - if (!type_end) - /* no end record seen */ - goto failed; - ret = 0; -failed: - kfree(buf); - release_firmware(fw); - if (ret < 0) - dev_info(&card->pdev->dev, "firmware %s failed\n", file); - return ret; -} - -int softing_load_app_fw(const char *file, struct softing *card) -{ - const struct firmware *fw; - const uint8_t *mem, *end, *dat; - int ret, j; - uint16_t type, len; - uint32_t addr, start_addr = 0; - unsigned int sum, rx_sum; - int8_t type_end = 0, type_entrypoint = 0; - - ret = request_firmware(&fw, file, &card->pdev->dev); - if (ret) { - dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n", - file, ret); - return ret; - } - dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n", - file, (unsigned long)fw->size); - /* parse the firmware */ - mem = fw->data; - end = &mem[fw->size]; - /* look for header record */ - ret = fw_parse(&mem, &type, &addr, &len, &dat); - if (ret) - goto failed; - ret = -EINVAL; - if (type != 0xffff) { - dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n", - type); - goto failed; - } - if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) { - dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n", - len, dat); - goto failed; - } - /* ok, we had a header */ - while (mem < end) { - ret = fw_parse(&mem, &type, &addr, &len, &dat); - if (ret) - goto failed; - - if (type == 3) { - /* start address */ - start_addr = addr; - type_entrypoint = 1; - continue; - } else if (type == 1) { - /* eof */ - type_end = 1; - break; - } else if (type != 0) { - dev_alert(&card->pdev->dev, - "unknown record type 0x%04x\n", type); - ret = -EINVAL; - goto failed; - } - - /* regualar data */ - for (sum = 0, j = 0; j < len; ++j) - sum += dat[j]; - /* work in 16bit (target) */ - sum &= 0xffff; - - memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len); - iowrite32(card->pdat->app.offs + card->pdat->app.addr, - &card->dpram[DPRAM_COMMAND + 2]); - iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]); - iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]); - iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]); - ret = softing_bootloader_command(card, 1, "loading app."); - if (ret < 0) - goto failed; - /* verify checksum */ - rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]); - if (rx_sum != sum) { - dev_alert(&card->pdev->dev, "SRAM seems to be damaged" - ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum); - ret = -EIO; - goto failed; - } - } - if (!type_end || !type_entrypoint) - goto failed; - /* start application in card */ - iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]); - iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]); - ret = softing_bootloader_command(card, 3, "start app."); - if (ret < 0) - goto failed; - ret = 0; -failed: - release_firmware(fw); - if (ret < 0) - dev_info(&card->pdev->dev, "firmware %s failed\n", file); - return ret; -} - -static int softing_reset_chip(struct softing *card) -{ - int ret; - - do { - /* reset chip */ - iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]); - iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]); - iowrite8(1, &card->dpram[DPRAM_RESET]); - iowrite8(0, &card->dpram[DPRAM_RESET+1]); - - ret = softing_fct_cmd(card, 0, "reset_can"); - if (!ret) - break; - if (signal_pending(current)) - /* don't wait any longer */ - break; - } while (1); - card->tx.pending = 0; - return ret; -} - -int softing_chip_poweron(struct softing *card) -{ - int ret; - /* sync */ - ret = _softing_fct_cmd(card, 99, 0x55, "sync-a"); - if (ret < 0) - goto failed; - - ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b"); - if (ret < 0) - goto failed; - - ret = softing_reset_chip(card); - if (ret < 0) - goto failed; - /* get_serial */ - ret = softing_fct_cmd(card, 43, "get_serial_number"); - if (ret < 0) - goto failed; - card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]); - /* get_version */ - ret = softing_fct_cmd(card, 12, "get_version"); - if (ret < 0) - goto failed; - card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]); - card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]); - card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]); - card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]); - card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]); - return 0; -failed: - return ret; -} - -static void softing_initialize_timestamp(struct softing *card) -{ - uint64_t ovf; - - card->ts_ref = ktime_get(); - - /* 16MHz is the reference */ - ovf = 0x100000000ULL * 16; - do_div(ovf, card->pdat->freq ?: 16); - - card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); -} - -ktime_t softing_raw2ktime(struct softing *card, u32 raw) -{ - uint64_t rawl; - ktime_t now, real_offset; - ktime_t target; - ktime_t tmp; - - now = ktime_get(); - real_offset = ktime_sub(ktime_get_real(), now); - - /* find nsec from card */ - rawl = raw * 16; - do_div(rawl, card->pdat->freq ?: 16); - target = ktime_add_us(card->ts_ref, rawl); - /* test for overflows */ - tmp = ktime_add(target, card->ts_overflow); - while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) { - card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow); - target = tmp; - tmp = ktime_add(target, card->ts_overflow); - } - return ktime_add(target, real_offset); -} - -static inline int softing_error_reporting(struct net_device *netdev) -{ - struct softing_priv *priv = netdev_priv(netdev); - - return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) - ? 1 : 0; -} - -int softing_startstop(struct net_device *dev, int up) -{ - int ret; - struct softing *card; - struct softing_priv *priv; - struct net_device *netdev; - int bus_bitmask_start; - int j, error_reporting; - struct can_frame msg; - const struct can_bittiming *bt; - - priv = netdev_priv(dev); - card = priv->card; - - if (!card->fw.up) - return -EIO; - - ret = mutex_lock_interruptible(&card->fw.lock); - if (ret) - return ret; - - bus_bitmask_start = 0; - if (dev && up) - /* prepare to start this bus as well */ - bus_bitmask_start |= (1 << priv->index); - /* bring netdevs down */ - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - netdev = card->net[j]; - if (!netdev) - continue; - priv = netdev_priv(netdev); - - if (dev != netdev) - netif_stop_queue(netdev); - - if (netif_running(netdev)) { - if (dev != netdev) - bus_bitmask_start |= (1 << j); - priv->tx.pending = 0; - priv->tx.echo_put = 0; - priv->tx.echo_get = 0; - /* - * this bus' may just have called open_candev() - * which is rather stupid to call close_candev() - * already - * but we may come here from busoff recovery too - * in which case the echo_skb _needs_ flushing too. - * just be sure to call open_candev() again - */ - close_candev(netdev); - } - priv->can.state = CAN_STATE_STOPPED; - } - card->tx.pending = 0; - - softing_enable_irq(card, 0); - ret = softing_reset_chip(card); - if (ret) - goto failed; - if (!bus_bitmask_start) - /* no busses to be brought up */ - goto card_done; - - if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) - && (softing_error_reporting(card->net[0]) - != softing_error_reporting(card->net[1]))) { - dev_alert(&card->pdev->dev, - "err_reporting flag differs for busses\n"); - goto invalid; - } - error_reporting = 0; - if (bus_bitmask_start & 1) { - netdev = card->net[0]; - priv = netdev_priv(netdev); - error_reporting += softing_error_reporting(netdev); - /* init chip 1 */ - bt = &priv->can.bittiming; - iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); - iowrite16(bt->phase_seg1 + bt->prop_seg, - &card->dpram[DPRAM_FCT_PARAM + 6]); - iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); - iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, - &card->dpram[DPRAM_FCT_PARAM + 10]); - ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); - if (ret < 0) - goto failed; - /* set mode */ - iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); - ret = softing_fct_cmd(card, 3, "set_mode[0]"); - if (ret < 0) - goto failed; - /* set filter */ - /* 11bit id & mask */ - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); - /* 29bit id.lo & mask.lo & id.hi & mask.hi */ - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); - iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); - iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); - ret = softing_fct_cmd(card, 7, "set_filter[0]"); - if (ret < 0) - goto failed; - /* set output control */ - iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); - ret = softing_fct_cmd(card, 5, "set_output[0]"); - if (ret < 0) - goto failed; - } - if (bus_bitmask_start & 2) { - netdev = card->net[1]; - priv = netdev_priv(netdev); - error_reporting += softing_error_reporting(netdev); - /* init chip2 */ - bt = &priv->can.bittiming; - iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); - iowrite16(bt->phase_seg1 + bt->prop_seg, - &card->dpram[DPRAM_FCT_PARAM + 6]); - iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); - iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, - &card->dpram[DPRAM_FCT_PARAM + 10]); - ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); - if (ret < 0) - goto failed; - /* set mode2 */ - iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); - ret = softing_fct_cmd(card, 4, "set_mode[1]"); - if (ret < 0) - goto failed; - /* set filter2 */ - /* 11bit id & mask */ - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); - /* 29bit id.lo & mask.lo & id.hi & mask.hi */ - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); - iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); - iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); - iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); - ret = softing_fct_cmd(card, 8, "set_filter[1]"); - if (ret < 0) - goto failed; - /* set output control2 */ - iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); - ret = softing_fct_cmd(card, 6, "set_output[1]"); - if (ret < 0) - goto failed; - } - /* enable_error_frame */ - /* - * Error reporting is switched off at the moment since - * the receiving of them is not yet 100% verified - * This should be enabled sooner or later - * - if (error_reporting) { - ret = softing_fct_cmd(card, 51, "enable_error_frame"); - if (ret < 0) - goto failed; - } - */ - /* initialize interface */ - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); - iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); - ret = softing_fct_cmd(card, 17, "initialize_interface"); - if (ret < 0) - goto failed; - /* enable_fifo */ - ret = softing_fct_cmd(card, 36, "enable_fifo"); - if (ret < 0) - goto failed; - /* enable fifo tx ack */ - ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); - if (ret < 0) - goto failed; - /* enable fifo tx ack2 */ - ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); - if (ret < 0) - goto failed; - /* start_chip */ - ret = softing_fct_cmd(card, 11, "start_chip"); - if (ret < 0) - goto failed; - iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); - iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); - if (card->pdat->generation < 2) { - iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); - /* flush the DPRAM caches */ - wmb(); - } - - softing_initialize_timestamp(card); - - /* - * do socketcan notifications/status changes - * from here, no errors should occur, or the failed: part - * must be reviewed - */ - memset(&msg, 0, sizeof(msg)); - msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; - msg.can_dlc = CAN_ERR_DLC; - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - if (!(bus_bitmask_start & (1 << j))) - continue; - netdev = card->net[j]; - if (!netdev) - continue; - priv = netdev_priv(netdev); - priv->can.state = CAN_STATE_ERROR_ACTIVE; - open_candev(netdev); - if (dev != netdev) { - /* notify other busses on the restart */ - softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); - ++priv->can.can_stats.restarts; - } - netif_wake_queue(netdev); - } - - /* enable interrupts */ - ret = softing_enable_irq(card, 1); - if (ret) - goto failed; -card_done: - mutex_unlock(&card->fw.lock); - return 0; -invalid: - ret = -EINVAL; -failed: - softing_enable_irq(card, 0); - softing_reset_chip(card); - mutex_unlock(&card->fw.lock); - /* bring all other interfaces down */ - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - netdev = card->net[j]; - if (!netdev) - continue; - dev_close(netdev); - } - return ret; -} - -int softing_default_output(struct net_device *netdev) -{ - struct softing_priv *priv = netdev_priv(netdev); - struct softing *card = priv->card; - - switch (priv->chip) { - case 1000: - return (card->pdat->generation < 2) ? 0xfb : 0xfa; - case 5: - return 0x60; - default: - return 0x40; - } -} diff --git a/trunk/drivers/net/can/softing/softing_main.c b/trunk/drivers/net/can/softing/softing_main.c deleted file mode 100644 index 5157e15e96eb..000000000000 --- a/trunk/drivers/net/can/softing/softing_main.c +++ /dev/null @@ -1,893 +0,0 @@ -/* - * Copyright (C) 2008-2010 - * - * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include - -#include "softing.h" - -#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1) - -/* - * test is a specific CAN netdev - * is online (ie. up 'n running, not sleeping, not busoff - */ -static inline int canif_is_active(struct net_device *netdev) -{ - struct can_priv *can = netdev_priv(netdev); - - if (!netif_running(netdev)) - return 0; - return (can->state <= CAN_STATE_ERROR_PASSIVE); -} - -/* reset DPRAM */ -static inline void softing_set_reset_dpram(struct softing *card) -{ - if (card->pdat->generation >= 2) { - spin_lock_bh(&card->spin); - iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1, - &card->dpram[DPRAM_V2_RESET]); - spin_unlock_bh(&card->spin); - } -} - -static inline void softing_clr_reset_dpram(struct softing *card) -{ - if (card->pdat->generation >= 2) { - spin_lock_bh(&card->spin); - iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1, - &card->dpram[DPRAM_V2_RESET]); - spin_unlock_bh(&card->spin); - } -} - -/* trigger the tx queue-ing */ -static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct softing_priv *priv = netdev_priv(dev); - struct softing *card = priv->card; - int ret; - uint8_t *ptr; - uint8_t fifo_wr, fifo_rd; - struct can_frame *cf = (struct can_frame *)skb->data; - uint8_t buf[DPRAM_TX_SIZE]; - - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - spin_lock(&card->spin); - - ret = NETDEV_TX_BUSY; - if (!card->fw.up || - (card->tx.pending >= TXMAX) || - (priv->tx.pending >= TX_ECHO_SKB_MAX)) - goto xmit_done; - fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); - fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); - if (fifo_wr == fifo_rd) - /* fifo full */ - goto xmit_done; - memset(buf, 0, sizeof(buf)); - ptr = buf; - *ptr = CMD_TX; - if (cf->can_id & CAN_RTR_FLAG) - *ptr |= CMD_RTR; - if (cf->can_id & CAN_EFF_FLAG) - *ptr |= CMD_XTD; - if (priv->index) - *ptr |= CMD_BUS2; - ++ptr; - *ptr++ = cf->can_dlc; - *ptr++ = (cf->can_id >> 0); - *ptr++ = (cf->can_id >> 8); - if (cf->can_id & CAN_EFF_FLAG) { - *ptr++ = (cf->can_id >> 16); - *ptr++ = (cf->can_id >> 24); - } else { - /* increment 1, not 2 as you might think */ - ptr += 1; - } - if (!(cf->can_id & CAN_RTR_FLAG)) - memcpy(ptr, &cf->data[0], cf->can_dlc); - memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], - buf, DPRAM_TX_SIZE); - if (++fifo_wr >= DPRAM_TX_CNT) - fifo_wr = 0; - iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]); - card->tx.last_bus = priv->index; - ++card->tx.pending; - ++priv->tx.pending; - can_put_echo_skb(skb, dev, priv->tx.echo_put); - ++priv->tx.echo_put; - if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) - priv->tx.echo_put = 0; - /* can_put_echo_skb() saves the skb, safe to return TX_OK */ - ret = NETDEV_TX_OK; -xmit_done: - spin_unlock(&card->spin); - if (card->tx.pending >= TXMAX) { - int j; - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - if (card->net[j]) - netif_stop_queue(card->net[j]); - } - } - if (ret != NETDEV_TX_OK) - netif_stop_queue(dev); - - return ret; -} - -/* - * shortcut for skb delivery - */ -int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg, - ktime_t ktime) -{ - struct sk_buff *skb; - struct can_frame *cf; - - skb = alloc_can_skb(netdev, &cf); - if (!skb) - return -ENOMEM; - memcpy(cf, msg, sizeof(*msg)); - skb->tstamp = ktime; - return netif_rx(skb); -} - -/* - * softing_handle_1 - * pop 1 entry from the DPRAM queue, and process - */ -static int softing_handle_1(struct softing *card) -{ - struct net_device *netdev; - struct softing_priv *priv; - ktime_t ktime; - struct can_frame msg; - int cnt = 0, lost_msg; - uint8_t fifo_rd, fifo_wr, cmd; - uint8_t *ptr; - uint32_t tmp_u32; - uint8_t buf[DPRAM_RX_SIZE]; - - memset(&msg, 0, sizeof(msg)); - /* test for lost msgs */ - lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]); - if (lost_msg) { - int j; - /* reset condition */ - iowrite8(0, &card->dpram[DPRAM_RX_LOST]); - /* prepare msg */ - msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; - msg.can_dlc = CAN_ERR_DLC; - msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - /* - * service to all busses, we don't know which it was applicable - * but only service busses that are online - */ - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - netdev = card->net[j]; - if (!netdev) - continue; - if (!canif_is_active(netdev)) - /* a dead bus has no overflows */ - continue; - ++netdev->stats.rx_over_errors; - softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); - } - /* prepare for other use */ - memset(&msg, 0, sizeof(msg)); - ++cnt; - } - - fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]); - fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]); - - if (++fifo_rd >= DPRAM_RX_CNT) - fifo_rd = 0; - if (fifo_wr == fifo_rd) - return cnt; - - memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd], - DPRAM_RX_SIZE); - mb(); - /* trigger dual port RAM */ - iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]); - - ptr = buf; - cmd = *ptr++; - if (cmd == 0xff) - /* not quite usefull, probably the card has got out */ - return 0; - netdev = card->net[0]; - if (cmd & CMD_BUS2) - netdev = card->net[1]; - priv = netdev_priv(netdev); - - if (cmd & CMD_ERR) { - uint8_t can_state, state; - - state = *ptr++; - - msg.can_id = CAN_ERR_FLAG; - msg.can_dlc = CAN_ERR_DLC; - - if (state & SF_MASK_BUSOFF) { - can_state = CAN_STATE_BUS_OFF; - msg.can_id |= CAN_ERR_BUSOFF; - state = STATE_BUSOFF; - } else if (state & SF_MASK_EPASSIVE) { - can_state = CAN_STATE_ERROR_PASSIVE; - msg.can_id |= CAN_ERR_CRTL; - msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE; - state = STATE_EPASSIVE; - } else { - can_state = CAN_STATE_ERROR_ACTIVE; - msg.can_id |= CAN_ERR_CRTL; - state = STATE_EACTIVE; - } - /* update DPRAM */ - iowrite8(state, &card->dpram[priv->index ? - DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); - /* timestamp */ - tmp_u32 = le32_to_cpup((void *)ptr); - ptr += 4; - ktime = softing_raw2ktime(card, tmp_u32); - - ++netdev->stats.rx_errors; - /* update internal status */ - if (can_state != priv->can.state) { - priv->can.state = can_state; - if (can_state == CAN_STATE_ERROR_PASSIVE) - ++priv->can.can_stats.error_passive; - else if (can_state == CAN_STATE_BUS_OFF) { - /* this calls can_close_cleanup() */ - can_bus_off(netdev); - netif_stop_queue(netdev); - } - /* trigger socketcan */ - softing_netdev_rx(netdev, &msg, ktime); - } - - } else { - if (cmd & CMD_RTR) - msg.can_id |= CAN_RTR_FLAG; - msg.can_dlc = get_can_dlc(*ptr++); - if (cmd & CMD_XTD) { - msg.can_id |= CAN_EFF_FLAG; - msg.can_id |= le32_to_cpup((void *)ptr); - ptr += 4; - } else { - msg.can_id |= le16_to_cpup((void *)ptr); - ptr += 2; - } - /* timestamp */ - tmp_u32 = le32_to_cpup((void *)ptr); - ptr += 4; - ktime = softing_raw2ktime(card, tmp_u32); - if (!(msg.can_id & CAN_RTR_FLAG)) - memcpy(&msg.data[0], ptr, 8); - ptr += 8; - /* update socket */ - if (cmd & CMD_ACK) { - /* acknowledge, was tx msg */ - struct sk_buff *skb; - skb = priv->can.echo_skb[priv->tx.echo_get]; - if (skb) - skb->tstamp = ktime; - can_get_echo_skb(netdev, priv->tx.echo_get); - ++priv->tx.echo_get; - if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) - priv->tx.echo_get = 0; - if (priv->tx.pending) - --priv->tx.pending; - if (card->tx.pending) - --card->tx.pending; - ++netdev->stats.tx_packets; - if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.tx_bytes += msg.can_dlc; - } else { - int ret; - - ret = softing_netdev_rx(netdev, &msg, ktime); - if (ret == NET_RX_SUCCESS) { - ++netdev->stats.rx_packets; - if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.rx_bytes += msg.can_dlc; - } else { - ++netdev->stats.rx_dropped; - } - } - } - ++cnt; - return cnt; -} - -/* - * real interrupt handler - */ -static irqreturn_t softing_irq_thread(int irq, void *dev_id) -{ - struct softing *card = (struct softing *)dev_id; - struct net_device *netdev; - struct softing_priv *priv; - int j, offset, work_done; - - work_done = 0; - spin_lock_bh(&card->spin); - while (softing_handle_1(card) > 0) { - ++card->irq.svc_count; - ++work_done; - } - spin_unlock_bh(&card->spin); - /* resume tx queue's */ - offset = card->tx.last_bus; - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - if (card->tx.pending >= TXMAX) - break; - netdev = card->net[(j + offset + 1) % card->pdat->nbus]; - if (!netdev) - continue; - priv = netdev_priv(netdev); - if (!canif_is_active(netdev)) - /* it makes no sense to wake dead busses */ - continue; - if (priv->tx.pending >= TX_ECHO_SKB_MAX) - continue; - ++work_done; - netif_wake_queue(netdev); - } - return work_done ? IRQ_HANDLED : IRQ_NONE; -} - -/* - * interrupt routines: - * schedule the 'real interrupt handler' - */ -static irqreturn_t softing_irq_v2(int irq, void *dev_id) -{ - struct softing *card = (struct softing *)dev_id; - uint8_t ir; - - ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]); - iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); - return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE; -} - -static irqreturn_t softing_irq_v1(int irq, void *dev_id) -{ - struct softing *card = (struct softing *)dev_id; - uint8_t ir; - - ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]); - iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]); - return ir ? IRQ_WAKE_THREAD : IRQ_NONE; -} - -/* - * netdev/candev inter-operability - */ -static int softing_netdev_open(struct net_device *ndev) -{ - int ret; - - /* check or determine and set bittime */ - ret = open_candev(ndev); - if (!ret) - ret = softing_startstop(ndev, 1); - return ret; -} - -static int softing_netdev_stop(struct net_device *ndev) -{ - int ret; - - netif_stop_queue(ndev); - - /* softing cycle does close_candev() */ - ret = softing_startstop(ndev, 0); - return ret; -} - -static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) -{ - int ret; - - switch (mode) { - case CAN_MODE_START: - /* softing_startstop does close_candev() */ - ret = softing_startstop(ndev, 1); - return ret; - case CAN_MODE_STOP: - case CAN_MODE_SLEEP: - return -EOPNOTSUPP; - } - return 0; -} - -/* - * Softing device management helpers - */ -int softing_enable_irq(struct softing *card, int enable) -{ - int ret; - - if (!card->irq.nr) { - return 0; - } else if (card->irq.requested && !enable) { - free_irq(card->irq.nr, card); - card->irq.requested = 0; - } else if (!card->irq.requested && enable) { - ret = request_threaded_irq(card->irq.nr, - (card->pdat->generation >= 2) ? - softing_irq_v2 : softing_irq_v1, - softing_irq_thread, IRQF_SHARED, - dev_name(&card->pdev->dev), card); - if (ret) { - dev_alert(&card->pdev->dev, - "request_threaded_irq(%u) failed\n", - card->irq.nr); - return ret; - } - card->irq.requested = 1; - } - return 0; -} - -static void softing_card_shutdown(struct softing *card) -{ - int fw_up = 0; - - if (mutex_lock_interruptible(&card->fw.lock)) - /* return -ERESTARTSYS */; - fw_up = card->fw.up; - card->fw.up = 0; - - if (card->irq.requested && card->irq.nr) { - free_irq(card->irq.nr, card); - card->irq.requested = 0; - } - if (fw_up) { - if (card->pdat->enable_irq) - card->pdat->enable_irq(card->pdev, 0); - softing_set_reset_dpram(card); - if (card->pdat->reset) - card->pdat->reset(card->pdev, 1); - } - mutex_unlock(&card->fw.lock); -} - -static __devinit int softing_card_boot(struct softing *card) -{ - int ret, j; - static const uint8_t stream[] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }; - unsigned char back[sizeof(stream)]; - - if (mutex_lock_interruptible(&card->fw.lock)) - return -ERESTARTSYS; - if (card->fw.up) { - mutex_unlock(&card->fw.lock); - return 0; - } - /* reset board */ - if (card->pdat->enable_irq) - card->pdat->enable_irq(card->pdev, 1); - /* boot card */ - softing_set_reset_dpram(card); - if (card->pdat->reset) - card->pdat->reset(card->pdev, 1); - for (j = 0; (j + sizeof(stream)) < card->dpram_size; - j += sizeof(stream)) { - - memcpy_toio(&card->dpram[j], stream, sizeof(stream)); - /* flush IO cache */ - mb(); - memcpy_fromio(back, &card->dpram[j], sizeof(stream)); - - if (!memcmp(back, stream, sizeof(stream))) - continue; - /* memory is not equal */ - dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j); - ret = -EIO; - goto failed; - } - wmb(); - /* load boot firmware */ - ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram, - card->dpram_size, - card->pdat->boot.offs - card->pdat->boot.addr); - if (ret < 0) - goto failed; - /* load loader firmware */ - ret = softing_load_fw(card->pdat->load.fw, card, card->dpram, - card->dpram_size, - card->pdat->load.offs - card->pdat->load.addr); - if (ret < 0) - goto failed; - - if (card->pdat->reset) - card->pdat->reset(card->pdev, 0); - softing_clr_reset_dpram(card); - ret = softing_bootloader_command(card, 0, "card boot"); - if (ret < 0) - goto failed; - ret = softing_load_app_fw(card->pdat->app.fw, card); - if (ret < 0) - goto failed; - - ret = softing_chip_poweron(card); - if (ret < 0) - goto failed; - - card->fw.up = 1; - mutex_unlock(&card->fw.lock); - return 0; -failed: - card->fw.up = 0; - if (card->pdat->enable_irq) - card->pdat->enable_irq(card->pdev, 0); - softing_set_reset_dpram(card); - if (card->pdat->reset) - card->pdat->reset(card->pdev, 1); - mutex_unlock(&card->fw.lock); - return ret; -} - -/* - * netdev sysfs - */ -static ssize_t show_channel(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct net_device *ndev = to_net_dev(dev); - struct softing_priv *priv = netdev2softing(ndev); - - return sprintf(buf, "%i\n", priv->index); -} - -static ssize_t show_chip(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct net_device *ndev = to_net_dev(dev); - struct softing_priv *priv = netdev2softing(ndev); - - return sprintf(buf, "%i\n", priv->chip); -} - -static ssize_t show_output(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct net_device *ndev = to_net_dev(dev); - struct softing_priv *priv = netdev2softing(ndev); - - return sprintf(buf, "0x%02x\n", priv->output); -} - -static ssize_t store_output(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *ndev = to_net_dev(dev); - struct softing_priv *priv = netdev2softing(ndev); - struct softing *card = priv->card; - unsigned long val; - int ret; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - val &= 0xFF; - - ret = mutex_lock_interruptible(&card->fw.lock); - if (ret) - return -ERESTARTSYS; - if (netif_running(ndev)) { - mutex_unlock(&card->fw.lock); - return -EBUSY; - } - priv->output = val; - mutex_unlock(&card->fw.lock); - return count; -} - -static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); -static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); -static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); - -static const struct attribute *const netdev_sysfs_attrs[] = { - &dev_attr_channel.attr, - &dev_attr_chip.attr, - &dev_attr_output.attr, - NULL, -}; -static const struct attribute_group netdev_sysfs_group = { - .name = NULL, - .attrs = (struct attribute **)netdev_sysfs_attrs, -}; - -static const struct net_device_ops softing_netdev_ops = { - .ndo_open = softing_netdev_open, - .ndo_stop = softing_netdev_stop, - .ndo_start_xmit = softing_netdev_start_xmit, -}; - -static const struct can_bittiming_const softing_btr_const = { - .tseg1_min = 1, - .tseg1_max = 16, - .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, /* overruled */ - .brp_min = 1, - .brp_max = 32, /* overruled */ - .brp_inc = 1, -}; - - -static __devinit struct net_device *softing_netdev_create(struct softing *card, - uint16_t chip_id) -{ - struct net_device *netdev; - struct softing_priv *priv; - - netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); - if (!netdev) { - dev_alert(&card->pdev->dev, "alloc_candev failed\n"); - return NULL; - } - priv = netdev_priv(netdev); - priv->netdev = netdev; - priv->card = card; - memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const)); - priv->btr_const.brp_max = card->pdat->max_brp; - priv->btr_const.sjw_max = card->pdat->max_sjw; - priv->can.bittiming_const = &priv->btr_const; - priv->can.clock.freq = 8000000; - priv->chip = chip_id; - priv->output = softing_default_output(netdev); - SET_NETDEV_DEV(netdev, &card->pdev->dev); - - netdev->flags |= IFF_ECHO; - netdev->netdev_ops = &softing_netdev_ops; - priv->can.do_set_mode = softing_candev_set_mode; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; - - return netdev; -} - -static __devinit int softing_netdev_register(struct net_device *netdev) -{ - int ret; - - netdev->sysfs_groups[0] = &netdev_sysfs_group; - ret = register_candev(netdev); - if (ret) { - dev_alert(&netdev->dev, "register failed\n"); - return ret; - } - return 0; -} - -static void softing_netdev_cleanup(struct net_device *netdev) -{ - unregister_candev(netdev); - free_candev(netdev); -} - -/* - * sysfs for Platform device - */ -#define DEV_ATTR_RO(name, member) \ -static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ - return sprintf(buf, "%u\n", card->member); \ -} \ -static DEVICE_ATTR(name, 0444, show_##name, NULL) - -#define DEV_ATTR_RO_STR(name, member) \ -static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ - return sprintf(buf, "%s\n", card->member); \ -} \ -static DEVICE_ATTR(name, 0444, show_##name, NULL) - -DEV_ATTR_RO(serial, id.serial); -DEV_ATTR_RO_STR(firmware, pdat->app.fw); -DEV_ATTR_RO(firmware_version, id.fw_version); -DEV_ATTR_RO_STR(hardware, pdat->name); -DEV_ATTR_RO(hardware_version, id.hw_version); -DEV_ATTR_RO(license, id.license); -DEV_ATTR_RO(frequency, id.freq); -DEV_ATTR_RO(txpending, tx.pending); - -static struct attribute *softing_pdev_attrs[] = { - &dev_attr_serial.attr, - &dev_attr_firmware.attr, - &dev_attr_firmware_version.attr, - &dev_attr_hardware.attr, - &dev_attr_hardware_version.attr, - &dev_attr_license.attr, - &dev_attr_frequency.attr, - &dev_attr_txpending.attr, - NULL, -}; - -static const struct attribute_group softing_pdev_group = { - .name = NULL, - .attrs = softing_pdev_attrs, -}; - -/* - * platform driver - */ -static __devexit int softing_pdev_remove(struct platform_device *pdev) -{ - struct softing *card = platform_get_drvdata(pdev); - int j; - - /* first, disable card*/ - softing_card_shutdown(card); - - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - if (!card->net[j]) - continue; - softing_netdev_cleanup(card->net[j]); - card->net[j] = NULL; - } - sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); - - iounmap(card->dpram); - kfree(card); - return 0; -} - -static __devinit int softing_pdev_probe(struct platform_device *pdev) -{ - const struct softing_platform_data *pdat = pdev->dev.platform_data; - struct softing *card; - struct net_device *netdev; - struct softing_priv *priv; - struct resource *pres; - int ret; - int j; - - if (!pdat) { - dev_warn(&pdev->dev, "no platform data\n"); - return -EINVAL; - } - if (pdat->nbus > ARRAY_SIZE(card->net)) { - dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus); - return -EINVAL; - } - - card = kzalloc(sizeof(*card), GFP_KERNEL); - if (!card) - return -ENOMEM; - card->pdat = pdat; - card->pdev = pdev; - platform_set_drvdata(pdev, card); - mutex_init(&card->fw.lock); - spin_lock_init(&card->spin); - - ret = -EINVAL; - pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!pres) - goto platform_resource_failed;; - card->dpram_phys = pres->start; - card->dpram_size = pres->end - pres->start + 1; - card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); - if (!card->dpram) { - dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); - goto ioremap_failed; - } - - pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (pres) - card->irq.nr = pres->start; - - /* reset card */ - ret = softing_card_boot(card); - if (ret < 0) { - dev_alert(&pdev->dev, "failed to boot\n"); - goto boot_failed; - } - - /* only now, the chip's are known */ - card->id.freq = card->pdat->freq; - - ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group); - if (ret < 0) { - dev_alert(&card->pdev->dev, "sysfs failed\n"); - goto sysfs_failed; - } - - ret = -ENOMEM; - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - card->net[j] = netdev = - softing_netdev_create(card, card->id.chip[j]); - if (!netdev) { - dev_alert(&pdev->dev, "failed to make can[%i]", j); - goto netdev_failed; - } - priv = netdev_priv(card->net[j]); - priv->index = j; - ret = softing_netdev_register(netdev); - if (ret) { - free_candev(netdev); - card->net[j] = NULL; - dev_alert(&card->pdev->dev, - "failed to register can[%i]\n", j); - goto netdev_failed; - } - } - dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name); - return 0; - -netdev_failed: - for (j = 0; j < ARRAY_SIZE(card->net); ++j) { - if (!card->net[j]) - continue; - softing_netdev_cleanup(card->net[j]); - } - sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); -sysfs_failed: - softing_card_shutdown(card); -boot_failed: - iounmap(card->dpram); -ioremap_failed: -platform_resource_failed: - kfree(card); - return ret; -} - -static struct platform_driver softing_driver = { - .driver = { - .name = "softing", - .owner = THIS_MODULE, - }, - .probe = softing_pdev_probe, - .remove = __devexit_p(softing_pdev_remove), -}; - -MODULE_ALIAS("platform:softing"); - -static int __init softing_start(void) -{ - return platform_driver_register(&softing_driver); -} - -static void __exit softing_stop(void) -{ - platform_driver_unregister(&softing_driver); -} - -module_init(softing_start); -module_exit(softing_stop); - -MODULE_DESCRIPTION("Softing DPRAM CAN driver"); -MODULE_AUTHOR("Kurt Van Dijck "); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/softing/softing_platform.h b/trunk/drivers/net/can/softing/softing_platform.h deleted file mode 100644 index ebbf69815623..000000000000 --- a/trunk/drivers/net/can/softing/softing_platform.h +++ /dev/null @@ -1,40 +0,0 @@ - -#include - -#ifndef _SOFTING_DEVICE_H_ -#define _SOFTING_DEVICE_H_ - -/* softing firmware directory prefix */ -#define fw_dir "softing-4.6/" - -struct softing_platform_data { - unsigned int manf; - unsigned int prod; - /* - * generation - * 1st with NEC or SJA1000 - * 8bit, exclusive interrupt, ... - * 2nd only SJA1000 - * 16bit, shared interrupt - */ - int generation; - int nbus; /* # busses on device */ - unsigned int freq; /* operating frequency in Hz */ - unsigned int max_brp; - unsigned int max_sjw; - unsigned long dpram_size; - const char *name; - struct { - unsigned long offs; - unsigned long addr; - const char *fw; - } boot, load, app; - /* - * reset() function - * bring pdev in or out of reset, depending on value - */ - int (*reset)(struct platform_device *pdev, int value); - int (*enable_irq)(struct platform_device *pdev, int value); -}; - -#endif diff --git a/trunk/drivers/net/cnic.c b/trunk/drivers/net/cnic.c index c82049635139..263a2944566f 100644 --- a/trunk/drivers/net/cnic.c +++ b/trunk/drivers/net/cnic.c @@ -65,14 +65,7 @@ static LIST_HEAD(cnic_udev_list); static DEFINE_RWLOCK(cnic_dev_lock); static DEFINE_MUTEX(cnic_lock); -static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; - -/* helper function, assuming cnic_lock is held */ -static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) -{ - return rcu_dereference_protected(cnic_ulp_tbl[type], - lockdep_is_held(&cnic_lock)); -} +static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; static int cnic_service_bnx2(void *, void *); static int cnic_service_bnx2x(void *, void *); @@ -442,7 +435,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) return -EINVAL; } mutex_lock(&cnic_lock); - if (cnic_ulp_tbl_prot(ulp_type)) { + if (cnic_ulp_tbl[ulp_type]) { pr_err("%s: Type %d has already been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); @@ -485,7 +478,7 @@ int cnic_unregister_driver(int ulp_type) return -EINVAL; } mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(ulp_type); + ulp_ops = cnic_ulp_tbl[ulp_type]; if (!ulp_ops) { pr_err("%s: Type %d has not been registered\n", __func__, ulp_type); @@ -536,7 +529,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, return -EINVAL; } mutex_lock(&cnic_lock); - if (cnic_ulp_tbl_prot(ulp_type) == NULL) { + if (cnic_ulp_tbl[ulp_type] == NULL) { pr_err("%s: Driver with type %d has not been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); @@ -551,7 +544,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); cp->ulp_handle[ulp_type] = ulp_ctx; - ulp_ops = cnic_ulp_tbl_prot(ulp_type); + ulp_ops = cnic_ulp_tbl[ulp_type]; rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); cnic_hold(dev); @@ -706,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) { int i; - __le32 *page_table = (__le32 *) dma->pgtbl; + u32 *page_table = dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in big endian format. */ - *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); page_table++; - *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + *page_table = (u32) dma->pg_map_arr[i]; page_table++; } } @@ -720,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) { int i; - __le32 *page_table = (__le32 *) dma->pgtbl; + u32 *page_table = dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in little endian format. */ - *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + *page_table = dma->pg_map_arr[i] & 0xffffffff; page_table++; - *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); page_table++; } } @@ -2960,8 +2953,7 @@ static void cnic_ulp_stop(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], - lockdep_is_held(&cnic_lock)); + ulp_ops = cp->ulp_ops[if_type]; if (!ulp_ops) { mutex_unlock(&cnic_lock); continue; @@ -2985,8 +2977,7 @@ static void cnic_ulp_start(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], - lockdep_is_held(&cnic_lock)); + ulp_ops = cp->ulp_ops[if_type]; if (!ulp_ops || !ulp_ops->cnic_start) { mutex_unlock(&cnic_lock); continue; @@ -3050,7 +3041,7 @@ static void cnic_ulp_init(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(i); + ulp_ops = cnic_ulp_tbl[i]; if (!ulp_ops || !ulp_ops->cnic_init) { mutex_unlock(&cnic_lock); continue; @@ -3074,7 +3065,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(i); + ulp_ops = cnic_ulp_tbl[i]; if (!ulp_ops || !ulp_ops->cnic_exit) { mutex_unlock(&cnic_lock); continue; diff --git a/trunk/drivers/net/cnic.h b/trunk/drivers/net/cnic.h index 4456260c653c..b328f6c924c3 100644 --- a/trunk/drivers/net/cnic.h +++ b/trunk/drivers/net/cnic.h @@ -220,7 +220,7 @@ struct cnic_local { #define ULP_F_INIT 0 #define ULP_F_START 1 #define ULP_F_CALL_PENDING 2 - struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE]; + struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; unsigned long cnic_local_flags; #define CNIC_LCL_FL_KWQ_INIT 0x0 diff --git a/trunk/drivers/net/cxgb4/cxgb4_main.c b/trunk/drivers/net/cxgb4/cxgb4_main.c index ec35d458102c..059c1eec8c3f 100644 --- a/trunk/drivers/net/cxgb4/cxgb4_main.c +++ b/trunk/drivers/net/cxgb4/cxgb4_main.c @@ -2710,8 +2710,6 @@ static int cxgb_open(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - netif_carrier_off(dev); - if (!(adapter->flags & FULL_INIT_DONE)) { err = cxgb_up(adapter); if (err < 0) @@ -3663,6 +3661,7 @@ static int __devinit init_one(struct pci_dev *pdev, pi->xact_addr_filt = -1; pi->rx_offload = RX_CSO; pi->port_id = i; + netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->features |= NETIF_F_SG | TSO_FLAGS; diff --git a/trunk/drivers/net/dl2k.c b/trunk/drivers/net/dl2k.c index c05db6046050..e1a8216ff692 100644 --- a/trunk/drivers/net/dl2k.c +++ b/trunk/drivers/net/dl2k.c @@ -1753,6 +1753,8 @@ rio_close (struct net_device *dev) /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, @@ -1761,8 +1763,6 @@ rio_close (struct net_device *dev) dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; diff --git a/trunk/drivers/net/e1000e/e1000.h b/trunk/drivers/net/e1000e/e1000.h index 00bf595ebd67..e610e1369053 100644 --- a/trunk/drivers/net/e1000e/e1000.h +++ b/trunk/drivers/net/e1000e/e1000.h @@ -364,7 +364,6 @@ struct e1000_adapter { /* structs defined in e1000_hw.h */ struct e1000_hw hw; - spinlock_t stats64_lock; struct e1000_hw_stats stats; struct e1000_phy_info phy_info; struct e1000_phy_stats phy_stats; @@ -495,9 +494,7 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); -extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 - *stats); +extern void e1000e_update_stats(struct e1000_adapter *adapter); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter); diff --git a/trunk/drivers/net/e1000e/ethtool.c b/trunk/drivers/net/e1000e/ethtool.c index daa7fe4b9fdd..fa08b6336cfb 100644 --- a/trunk/drivers/net/e1000e/ethtool.c +++ b/trunk/drivers/net/e1000e/ethtool.c @@ -46,15 +46,15 @@ struct e1000_stats { }; #define E1000_STAT(str, m) { \ - .stat_string = str, \ - .type = E1000_STATS, \ - .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ - .stat_offset = offsetof(struct e1000_adapter, m) } + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } #define E1000_NETDEV_STAT(str, m) { \ - .stat_string = str, \ - .type = NETDEV_STATS, \ - .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ - .stat_offset = offsetof(struct rtnl_link_stats64, m) } + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct net_device *)0)->m), \ + .stat_offset = offsetof(struct net_device, m) } static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("rx_packets", stats.gprc), @@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("tx_broadcast", stats.bptc), E1000_STAT("rx_multicast", stats.mprc), E1000_STAT("tx_multicast", stats.mptc), - E1000_NETDEV_STAT("rx_errors", rx_errors), - E1000_NETDEV_STAT("tx_errors", tx_errors), - E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_NETDEV_STAT("rx_errors", stats.rx_errors), + E1000_NETDEV_STAT("tx_errors", stats.tx_errors), + E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), E1000_STAT("multicast", stats.mprc), E1000_STAT("collisions", stats.colc), - E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), - E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), E1000_STAT("rx_crc_errors", stats.crcerrs), - E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), E1000_STAT("rx_no_buffer_count", stats.rnbc), E1000_STAT("rx_missed_errors", stats.mpc), E1000_STAT("tx_aborted_errors", stats.ecol), E1000_STAT("tx_carrier_errors", stats.tncrs), - E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), - E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), E1000_STAT("tx_window_errors", stats.latecol), E1000_STAT("tx_abort_late_coll", stats.latecol), E1000_STAT("tx_deferred_ok", stats.dc), @@ -684,13 +684,20 @@ static int e1000_set_ringparam(struct net_device *netdev, rx_old = adapter->rx_ring; err = -ENOMEM; - tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); + tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); if (!tx_ring) goto err_alloc_tx; + /* + * use a memcpy to save any previously configured + * items like napi structs from having to be + * reinitialized + */ + memcpy(tx_ring, tx_old, sizeof(struct e1000_ring)); - rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); + rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); if (!rx_ring) goto err_alloc_rx; + memcpy(rx_ring, rx_old, sizeof(struct e1000_ring)); adapter->tx_ring = tx_ring; adapter->rx_ring = rx_ring; @@ -1248,6 +1255,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; + u32 stat_reg = 0; u16 phy_reg = 0; s32 ret_val = 0; @@ -1355,7 +1363,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) * Set the ILOS bit on the fiber Nic if half duplex link is * detected. */ - if ((er32(STATUS) & E1000_STATUS_FD) == 0) + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); } @@ -1973,15 +1982,14 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, u64 *data) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 net_stats; int i; char *p = NULL; - e1000e_get_stats64(netdev, &net_stats); + e1000e_update_stats(adapter); for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: - p = (char *) &net_stats + + p = (char *) netdev + e1000_gstrings_stats[i].stat_offset; break; case E1000_STATS: diff --git a/trunk/drivers/net/e1000e/ich8lan.c b/trunk/drivers/net/e1000e/ich8lan.c index 232b42b7f7ce..fb46974cfec1 100644 --- a/trunk/drivers/net/e1000e/ich8lan.c +++ b/trunk/drivers/net/e1000e/ich8lan.c @@ -2104,6 +2104,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; + s32 i = 0; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); @@ -2139,8 +2140,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { - s32 i = 0; - /* * Otherwise poll for sometime so the current * cycle has a chance to end before giving up. diff --git a/trunk/drivers/net/e1000e/lib.c b/trunk/drivers/net/e1000e/lib.c index 96921de5df2e..68aa1749bf66 100644 --- a/trunk/drivers/net/e1000e/lib.c +++ b/trunk/drivers/net/e1000e/lib.c @@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); + u16 timeout = 0; u8 spi_stat_reg; if (nvm->type == e1000_nvm_eeprom_spi) { - u16 timeout = NVM_MAX_RETRY_SPI; - /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); ew32(EECD, eecd); udelay(1); + timeout = NVM_MAX_RETRY_SPI; /* * Read "Status Register" repeatedly until the LSB is cleared. diff --git a/trunk/drivers/net/e1000e/netdev.c b/trunk/drivers/net/e1000e/netdev.c index 5b916b01805f..1c18f26b0812 100644 --- a/trunk/drivers/net/e1000e/netdev.c +++ b/trunk/drivers/net/e1000e/netdev.c @@ -900,6 +900,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1055,6 +1057,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; return count < tx_ring->count; } @@ -1241,6 +1245,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1420,6 +1426,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -2720,6 +2728,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, rfctl; + u32 psrctl = 0; u32 pages = 0; /* Workaround Si errata on 82579 - configure jumbo frame flow */ @@ -2818,8 +2827,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { - u32 psrctl = 0; - /* Configure extra packet-split registers */ rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_EXTEN; @@ -3021,6 +3028,7 @@ static void e1000_set_multi(struct net_device *netdev) struct netdev_hw_addr *ha; u8 *mta_list; u32 rctl; + int i; /* Check for Promiscuous and All Multicast modes */ @@ -3043,13 +3051,12 @@ static void e1000_set_multi(struct net_device *netdev) ew32(RCTL, rctl); if (!netdev_mc_empty(netdev)) { - int i = 0; - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); if (!mta_list) return; /* prepare a packed array of only addresses. */ + i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); @@ -3331,8 +3338,6 @@ int e1000e_up(struct e1000_adapter *adapter) return 0; } -static void e1000e_update_stats(struct e1000_adapter *adapter); - void e1000e_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3367,11 +3372,6 @@ void e1000e_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->phy_info_timer); netif_carrier_off(netdev); - - spin_lock(&adapter->stats64_lock); - e1000e_update_stats(adapter); - spin_unlock(&adapter->stats64_lock); - adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3413,8 +3413,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - spin_lock_init(&adapter->stats64_lock); - e1000e_set_interrupt_capability(adapter); if (e1000_alloc_queues(adapter)) @@ -3888,7 +3886,7 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter) * e1000e_update_stats - Update the board statistics counters * @adapter: board private structure **/ -static void e1000e_update_stats(struct e1000_adapter *adapter) +void e1000e_update_stats(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; @@ -4000,11 +3998,10 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; + int ret_val; if ((er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { - int ret_val; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); @@ -4150,6 +4147,7 @@ static void e1000_watchdog_task(struct work_struct *work) struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_hw *hw = &adapter->hw; u32 link, tctl; + int tx_pending = 0; link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { @@ -4287,9 +4285,7 @@ static void e1000_watchdog_task(struct work_struct *work) } link_up: - spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); - spin_unlock(&adapter->stats64_lock); mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; adapter->tpt_old = adapter->stats.tpt; @@ -4303,18 +4299,21 @@ static void e1000_watchdog_task(struct work_struct *work) e1000e_update_adaptive(&adapter->hw); - if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { - /* - * We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; + if (!netif_carrier_ok(netdev)) { + tx_pending = (e1000_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } } /* Simple mode for Interrupt Throttle Rate (ITR) */ @@ -4385,13 +4384,13 @@ static int e1000_tso(struct e1000_adapter *adapter, u32 cmd_length = 0; u16 ipcse = 0, tucse, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; if (!skb_is_gso(skb)) return 0; if (skb_header_cloned(skb)) { - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } @@ -4898,55 +4897,16 @@ static void e1000_reset_task(struct work_struct *work) } /** - * e1000_get_stats64 - Get System Network Statistics + * e1000_get_stats - Get System Network Statistics * @netdev: network interface device structure - * @stats: rtnl_link_stats64 pointer * * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. **/ -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev_priv(netdev); - - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - spin_lock(&adapter->stats64_lock); - e1000e_update_stats(adapter); - /* Fill out the OS statistics structure */ - stats->rx_bytes = adapter->stats.gorc; - stats->rx_packets = adapter->stats.gprc; - stats->tx_bytes = adapter->stats.gotc; - stats->tx_packets = adapter->stats.gptc; - stats->multicast = adapter->stats.mprc; - stats->collisions = adapter->stats.colc; - - /* Rx Errors */ - - /* - * RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC - */ - stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - stats->rx_crc_errors = adapter->stats.crcerrs; - stats->rx_frame_errors = adapter->stats.algnerrc; - stats->rx_missed_errors = adapter->stats.mpc; - - /* Tx Errors */ - stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - stats->tx_aborted_errors = adapter->stats.ecol; - stats->tx_window_errors = adapter->stats.latecol; - stats->tx_carrier_errors = adapter->stats.tncrs; - - /* Tx Dropped needs to be maintained elsewhere */ - - spin_unlock(&adapter->stats64_lock); - return stats; + /* only return the current stats */ + return &netdev->stats; } /** @@ -5516,10 +5476,9 @@ static irqreturn_t e1000_intr_msix(int irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); + int vector, msix_irq; if (adapter->msix_entries) { - int vector, msix_irq; - vector = 0; msix_irq = adapter->msix_entries[vector].vector; disable_irq(msix_irq); @@ -5716,7 +5675,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_open = e1000_open, .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, - .ndo_get_stats64 = e1000e_get_stats64, + .ndo_get_stats = e1000_get_stats, .ndo_set_multicast_list = e1000_set_multi, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, diff --git a/trunk/drivers/net/e1000e/phy.c b/trunk/drivers/net/e1000e/phy.c index 6ae31fcfb629..6bea051b134b 100644 --- a/trunk/drivers/net/e1000e/phy.c +++ b/trunk/drivers/net/e1000e/phy.c @@ -2409,7 +2409,9 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; + u32 page_select = 0; u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -2425,8 +2427,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { - u32 page_shift, page_select; - /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for @@ -2468,7 +2468,9 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; + u32 page_select = 0; u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -2484,8 +2486,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { - u32 page_shift, page_select; - /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for diff --git a/trunk/drivers/net/enic/enic.h b/trunk/drivers/net/enic/enic.h index ca3be4f15556..a937f49d9db7 100644 --- a/trunk/drivers/net/enic/enic.h +++ b/trunk/drivers/net/enic/enic.h @@ -32,8 +32,8 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.2" -#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" +#define DRV_VERSION "1.4.1.10" +#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -49,7 +49,7 @@ struct enic_msix_entry { void *devid; }; -#define ENIC_PORT_REQUEST_APPLIED (1 << 0) +#define ENIC_SET_APPLIED (1 << 0) #define ENIC_SET_REQUEST (1 << 1) #define ENIC_SET_NAME (1 << 2) #define ENIC_SET_INSTANCE (1 << 3) diff --git a/trunk/drivers/net/enic/enic_main.c b/trunk/drivers/net/enic/enic_main.c index 89664c670972..a0af48c51fb3 100644 --- a/trunk/drivers/net/enic/enic_main.c +++ b/trunk/drivers/net/enic/enic_main.c @@ -1318,20 +1318,18 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac) vic_provinfo_free(vp); if (err) return err; + + enic->pp.set |= ENIC_SET_APPLIED; break; case PORT_REQUEST_DISASSOCIATE: + enic->pp.set &= ~ENIC_SET_APPLIED; break; default: return -EINVAL; } - /* Set flag to indicate that the port assoc/disassoc - * request has been sent out to fw - */ - enic->pp.set |= ENIC_PORT_REQUEST_APPLIED; - return 0; } @@ -1413,7 +1411,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, int err, error, done; u16 response = PORT_PROFILE_RESPONSE_SUCCESS; - if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) + if (!(enic->pp.set & ENIC_SET_APPLIED)) return -ENODATA; err = enic_dev_init_done(enic, &done, &error); diff --git a/trunk/drivers/net/hamradio/bpqether.c b/trunk/drivers/net/hamradio/bpqether.c index 8931168d3e74..ac1d323c5eb5 100644 --- a/trunk/drivers/net/hamradio/bpqether.c +++ b/trunk/drivers/net/hamradio/bpqether.c @@ -400,14 +400,13 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct list_head *p; - struct bpqdev *bpqdev = v; ++*pos; if (v == SEQ_START_TOKEN) - p = rcu_dereference(list_next_rcu(&bpq_devices)); + p = rcu_dereference(bpq_devices.next); else - p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list)); + p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); return (p == &bpq_devices) ? NULL : list_entry(p, struct bpqdev, bpq_list); diff --git a/trunk/drivers/net/igb/e1000_82575.c b/trunk/drivers/net/igb/e1000_82575.c index c1552b6f4a68..0a2368fa6bc6 100644 --- a/trunk/drivers/net/igb/e1000_82575.c +++ b/trunk/drivers/net/igb/e1000_82575.c @@ -129,7 +129,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; case E1000_DEV_ID_82580_COPPER: case E1000_DEV_ID_82580_FIBER: - case E1000_DEV_ID_82580_QUAD_FIBER: case E1000_DEV_ID_82580_SERDES: case E1000_DEV_ID_82580_SGMII: case E1000_DEV_ID_82580_COPPER_DUAL: diff --git a/trunk/drivers/net/igb/e1000_hw.h b/trunk/drivers/net/igb/e1000_hw.h index 281324e85980..e2638afb8cdc 100644 --- a/trunk/drivers/net/igb/e1000_hw.h +++ b/trunk/drivers/net/igb/e1000_hw.h @@ -54,7 +54,6 @@ struct e1000_hw; #define E1000_DEV_ID_82580_SERDES 0x1510 #define E1000_DEV_ID_82580_SGMII 0x1511 #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 -#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C diff --git a/trunk/drivers/net/igb/igb_main.c b/trunk/drivers/net/igb/igb_main.c index 200cc3209672..58c665b7513d 100644 --- a/trunk/drivers/net/igb/igb_main.c +++ b/trunk/drivers/net/igb/igb_main.c @@ -68,7 +68,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c index 2300e4599520..5933621ac3ff 100644 --- a/trunk/drivers/net/macvtap.c +++ b/trunk/drivers/net/macvtap.c @@ -39,7 +39,7 @@ struct macvtap_queue { struct socket sock; struct socket_wq wq; int vnet_hdr_sz; - struct macvlan_dev __rcu *vlan; + struct macvlan_dev *vlan; struct file *file; unsigned int flags; }; @@ -141,8 +141,7 @@ static void macvtap_put_queue(struct macvtap_queue *q) struct macvlan_dev *vlan; spin_lock(&macvtap_lock); - vlan = rcu_dereference_protected(q->vlan, - lockdep_is_held(&macvtap_lock)); + vlan = rcu_dereference(q->vlan); if (vlan) { int index = get_slot(vlan, q); @@ -220,8 +219,7 @@ static void macvtap_del_queues(struct net_device *dev) /* macvtap_put_queue can free some slots, so go through all slots */ spin_lock(&macvtap_lock); for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { - q = rcu_dereference_protected(vlan->taps[i], - lockdep_is_held(&macvtap_lock)); + q = rcu_dereference(vlan->taps[i]); if (q) { qlist[j++] = q; rcu_assign_pointer(vlan->taps[i], NULL); @@ -571,7 +569,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, } rcu_read_lock_bh(); - vlan = rcu_dereference_bh(q->vlan); + vlan = rcu_dereference(q->vlan); if (vlan) macvlan_start_xmit(skb, vlan->dev); else @@ -585,7 +583,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, err: rcu_read_lock_bh(); - vlan = rcu_dereference_bh(q->vlan); + vlan = rcu_dereference(q->vlan); if (vlan) vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); @@ -633,7 +631,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); rcu_read_lock_bh(); - vlan = rcu_dereference_bh(q->vlan); + vlan = rcu_dereference(q->vlan); if (vlan) macvlan_count_rx(vlan, len, ret == 0, 0); rcu_read_unlock_bh(); @@ -729,7 +727,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNGETIFF: rcu_read_lock_bh(); - vlan = rcu_dereference_bh(q->vlan); + vlan = rcu_dereference(q->vlan); if (vlan) dev_hold(vlan->dev); rcu_read_unlock_bh(); @@ -738,7 +736,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return -ENOLINK; ret = 0; - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || put_user(q->flags, &ifr->ifr_flags)) ret = -EFAULT; dev_put(vlan->dev); diff --git a/trunk/drivers/net/myri10ge/myri10ge.c b/trunk/drivers/net/myri10ge/myri10ge.c index a7f2eed9a08a..ea5cfe2c3a04 100644 --- a/trunk/drivers/net/myri10ge/myri10ge.c +++ b/trunk/drivers/net/myri10ge/myri10ge.c @@ -253,7 +253,7 @@ struct myri10ge_priv { unsigned long serial_number; int vendor_specific_offset; int fw_multicast_support; - u32 features; + unsigned long features; u32 max_tso6; u32 read_dma; u32 write_dma; @@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) { struct myri10ge_priv *mgp = netdev_priv(netdev); - u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); + unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); if (tso_enabled) netdev->features |= flags; diff --git a/trunk/drivers/net/pch_gbe/pch_gbe_main.c b/trunk/drivers/net/pch_gbe/pch_gbe_main.c index 1bf12339441b..d7355306a738 100644 --- a/trunk/drivers/net/pch_gbe/pch_gbe_main.c +++ b/trunk/drivers/net/pch_gbe/pch_gbe_main.c @@ -2247,7 +2247,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct pch_gbe_adapter *adapter = netdev_priv(netdev); - cancel_work_sync(&adapter->reset_task); + flush_scheduled_work(); unregister_netdev(netdev); pch_gbe_hal_phy_hw_reset(&adapter->hw); diff --git a/trunk/drivers/net/ppp_generic.c b/trunk/drivers/net/ppp_generic.c index 9f6d670748d1..c7a6c4466978 100644 --- a/trunk/drivers/net/ppp_generic.c +++ b/trunk/drivers/net/ppp_generic.c @@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ppp_release(NULL, file); err = 0; } else - pr_warn("PPPIOCDETACH file->f_count=%ld\n", - atomic_long_read(&file->f_count)); + printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", + atomic_long_read(&file->f_count)); mutex_unlock(&ppp_mutex); return err; } @@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (pf->kind != INTERFACE) { /* can't happen */ - pr_err("PPP: not interface or channel??\n"); + printk(KERN_ERR "PPP: not interface or channel??\n"); return -EINVAL; } @@ -704,8 +704,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } vj = slhc_init(val2+1, val+1); if (!vj) { - netdev_err(ppp->dev, - "PPP: no memory (VJ compressor)\n"); + printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); err = -ENOMEM; break; } @@ -899,17 +898,17 @@ static int __init ppp_init(void) { int err; - pr_info("PPP generic driver version " PPP_VERSION "\n"); + printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); err = register_pernet_device(&ppp_net_ops); if (err) { - pr_err("failed to register PPP pernet device (%d)\n", err); + printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); goto out; } err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (err) { - pr_err("failed to register PPP device (%d)\n", err); + printk(KERN_ERR "failed to register PPP device (%d)\n", err); goto out_net; } @@ -1079,7 +1078,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); if (!new_skb) { if (net_ratelimit()) - netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); + printk(KERN_ERR "PPP: no memory (comp pkt)\n"); return NULL; } if (ppp->dev->hard_header_len > PPP_HDRLEN) @@ -1109,7 +1108,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) * the same number. */ if (net_ratelimit()) - netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); + printk(KERN_ERR "ppp: compressor dropped pkt\n"); kfree_skb(skb); kfree_skb(new_skb); new_skb = NULL; @@ -1139,9 +1138,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) - netdev_printk(KERN_DEBUG, ppp->dev, - "PPP: outbound frame " - "not passed\n"); + printk(KERN_DEBUG "PPP: outbound frame not passed\n"); kfree_skb(skb); return; } @@ -1167,7 +1164,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, GFP_ATOMIC); if (!new_skb) { - netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); + printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); goto drop; } skb_reserve(new_skb, ppp->dev->hard_header_len - 2); @@ -1205,9 +1202,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) proto != PPP_LCP && proto != PPP_CCP) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (net_ratelimit()) - netdev_err(ppp->dev, - "ppp: compression required but " - "down - pkt dropped.\n"); + printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); goto drop; } skb = pad_compress_skb(ppp, skb); @@ -1510,7 +1505,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) noskb: spin_unlock_bh(&pch->downl); if (ppp->debug & 1) - netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); + printk(KERN_ERR "PPP: no memory (fragment)\n"); ++ppp->dev->stats.tx_errors; ++ppp->nxseq; return 1; /* abandon the frame */ @@ -1691,8 +1686,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* copy to a new sk_buff with more tailroom */ ns = dev_alloc_skb(skb->len + 128); if (!ns) { - netdev_err(ppp->dev, "PPP: no memory " - "(VJ decomp)\n"); + printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); goto err; } skb_reserve(ns, 2); @@ -1705,8 +1699,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); if (len <= 0) { - netdev_printk(KERN_DEBUG, ppp->dev, - "PPP: VJ decompression error\n"); + printk(KERN_DEBUG "PPP: VJ decompression error\n"); goto err; } len += 2; @@ -1728,7 +1721,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) goto err; if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { - netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); + printk(KERN_ERR "PPP: VJ uncompressed error\n"); goto err; } proto = PPP_IP; @@ -1769,9 +1762,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) - netdev_printk(KERN_DEBUG, ppp->dev, - "PPP: inbound frame " - "not passed\n"); + printk(KERN_DEBUG "PPP: inbound frame " + "not passed\n"); kfree_skb(skb); return; } @@ -1829,8 +1821,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) ns = dev_alloc_skb(obuff_size); if (!ns) { - netdev_err(ppp->dev, "ppp_decompress_frame: " - "no memory\n"); + printk(KERN_ERR "ppp_decompress_frame: no memory\n"); goto err; } /* the decompressor still expects the A/C bytes in the hdr */ @@ -1998,7 +1989,7 @@ ppp_mp_reconstruct(struct ppp *ppp) u32 seq = ppp->nextseq; u32 minseq = ppp->minseq; struct sk_buff_head *list = &ppp->mrq; - struct sk_buff *p, *tmp; + struct sk_buff *p, *next; struct sk_buff *head, *tail; struct sk_buff *skb = NULL; int lost = 0, len = 0; @@ -2007,15 +1998,13 @@ ppp_mp_reconstruct(struct ppp *ppp) return NULL; head = list->next; tail = NULL; - skb_queue_walk_safe(list, p, tmp) { - again: + for (p = head; p != (struct sk_buff *) list; p = next) { + next = p->next; if (seq_before(PPP_MP_CB(p)->sequence, seq)) { /* this can't happen, anyway ignore the skb */ - netdev_err(ppp->dev, "ppp_mp_reconstruct bad " - "seq %u < %u\n", - PPP_MP_CB(p)->sequence, seq); - __skb_unlink(p, list); - kfree_skb(p); + printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", + PPP_MP_CB(p)->sequence, seq); + head = next; continue; } if (PPP_MP_CB(p)->sequence != seq) { @@ -2027,7 +2016,8 @@ ppp_mp_reconstruct(struct ppp *ppp) lost = 1; seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? minseq + 1: PPP_MP_CB(p)->sequence; - goto again; + next = p; + continue; } /* @@ -2052,9 +2042,17 @@ ppp_mp_reconstruct(struct ppp *ppp) (PPP_MP_CB(head)->BEbits & B)) { if (len > ppp->mrru + 2) { ++ppp->dev->stats.rx_length_errors; - netdev_printk(KERN_DEBUG, ppp->dev, - "PPP: reconstructed packet" - " is too long (%d)\n", len); + printk(KERN_DEBUG "PPP: reconstructed packet" + " is too long (%d)\n", len); + } else if (p == head) { + /* fragment is complete packet - reuse skb */ + tail = p; + skb = skb_get(p); + break; + } else if ((skb = dev_alloc_skb(len)) == NULL) { + ++ppp->dev->stats.rx_missed_errors; + printk(KERN_DEBUG "PPP: no memory for " + "reconstructed packet"); } else { tail = p; break; @@ -2067,17 +2065,9 @@ ppp_mp_reconstruct(struct ppp *ppp) * and we haven't found a complete valid packet yet, * we can discard up to and including this fragment. */ - if (PPP_MP_CB(p)->BEbits & E) { - struct sk_buff *tmp2; + if (PPP_MP_CB(p)->BEbits & E) + head = next; - skb_queue_reverse_walk_from_safe(list, p, tmp2) { - __skb_unlink(p, list); - kfree_skb(p); - } - head = skb_peek(list); - if (!head) - break; - } ++seq; } @@ -2087,37 +2077,26 @@ ppp_mp_reconstruct(struct ppp *ppp) signal a receive error. */ if (PPP_MP_CB(head)->sequence != ppp->nextseq) { if (ppp->debug & 1) - netdev_printk(KERN_DEBUG, ppp->dev, - " missed pkts %u..%u\n", - ppp->nextseq, - PPP_MP_CB(head)->sequence-1); + printk(KERN_DEBUG " missed pkts %u..%u\n", + ppp->nextseq, + PPP_MP_CB(head)->sequence-1); ++ppp->dev->stats.rx_dropped; ppp_receive_error(ppp); } - skb = head; - if (head != tail) { - struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; - p = skb_queue_next(list, head); - __skb_unlink(skb, list); - skb_queue_walk_from_safe(list, p, tmp) { - __skb_unlink(p, list); - *fragpp = p; - p->next = NULL; - fragpp = &p->next; - - skb->len += p->len; - skb->data_len += p->len; - skb->truesize += p->len; - - if (p == tail) - break; - } - } else { - __skb_unlink(skb, list); - } - + if (head != tail) + /* copy to a single skb */ + for (p = head; p != tail->next; p = p->next) + skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; + head = tail->next; + } + + /* Discard all the skbuffs that we have copied the data out of + or that we can't use. */ + while ((p = list->next) != head) { + __skb_unlink(p, list); + kfree_skb(p); } return skb; @@ -2638,8 +2617,8 @@ ppp_create_interface(struct net *net, int unit, int *retp) ret = register_netdev(dev); if (ret != 0) { unit_put(&pn->units_idr, unit); - netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", - dev->name, ret); + printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", + dev->name, ret); goto out2; } @@ -2711,9 +2690,9 @@ static void ppp_destroy_interface(struct ppp *ppp) if (!ppp->file.dead || ppp->n_channels) { /* "can't happen" */ - netdev_err(ppp->dev, "ppp: destroying ppp struct %p " - "but dead=%d n_channels=%d !\n", - ppp, ppp->file.dead, ppp->n_channels); + printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " + "n_channels=%d !\n", ppp, ppp->file.dead, + ppp->n_channels); return; } @@ -2855,7 +2834,8 @@ static void ppp_destroy_channel(struct channel *pch) if (!pch->file.dead) { /* "can't happen" */ - pr_err("ppp: destroying undead channel %p !\n", pch); + printk(KERN_ERR "ppp: destroying undead channel %p !\n", + pch); return; } skb_queue_purge(&pch->file.xq); @@ -2867,7 +2847,7 @@ static void __exit ppp_cleanup(void) { /* should never happen */ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) - pr_err("PPP: removing module but units remain!\n"); + printk(KERN_ERR "PPP: removing module but units remain!\n"); unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); @@ -2885,7 +2865,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n) again: if (!idr_pre_get(p, GFP_KERNEL)) { - pr_err("PPP: No free memory for idr\n"); + printk(KERN_ERR "PPP: No free memory for idr\n"); return -ENOMEM; } diff --git a/trunk/drivers/net/sfc/ethtool.c b/trunk/drivers/net/sfc/ethtool.c index 713969accdbd..0e8bb19ed60d 100644 --- a/trunk/drivers/net/sfc/ethtool.c +++ b/trunk/drivers/net/sfc/ethtool.c @@ -502,7 +502,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) { struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); - u32 features; + unsigned long features; features = NETIF_F_TSO; if (efx->type->offload_features & NETIF_F_V6_CSUM) @@ -519,7 +519,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) { struct efx_nic *efx = netdev_priv(net_dev); - u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM; + unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; if (enable) net_dev->features |= features; diff --git a/trunk/drivers/net/sfc/net_driver.h b/trunk/drivers/net/sfc/net_driver.h index c65270241d2d..28df8665256a 100644 --- a/trunk/drivers/net/sfc/net_driver.h +++ b/trunk/drivers/net/sfc/net_driver.h @@ -906,7 +906,7 @@ struct efx_nic_type { unsigned int phys_addr_channels; unsigned int tx_dc_base; unsigned int rx_dc_base; - u32 offload_features; + unsigned long offload_features; u32 reset_world_flags; }; diff --git a/trunk/drivers/net/smc91x.c b/trunk/drivers/net/smc91x.c index 43654a3bb0ec..726df611ee17 100644 --- a/trunk/drivers/net/smc91x.c +++ b/trunk/drivers/net/smc91x.c @@ -81,7 +81,6 @@ static const char version[] = #include #include #include -#include #include #include @@ -2395,15 +2394,6 @@ static int smc_drv_resume(struct device *dev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id smc91x_match[] = { - { .compatible = "smsc,lan91c94", }, - { .compatible = "smsc,lan91c111", }, - {}, -} -MODULE_DEVICE_TABLE(of, smc91x_match); -#endif - static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, @@ -2416,9 +2406,6 @@ static struct platform_driver smc_driver = { .name = CARDNAME, .owner = THIS_MODULE, .pm = &smc_drv_pm_ops, -#ifdef CONFIG_OF - .of_match_table = smc91x_match, -#endif }, }; diff --git a/trunk/drivers/net/sungem.c b/trunk/drivers/net/sungem.c index c1a344829b54..1c5408f83937 100644 --- a/trunk/drivers/net/sungem.c +++ b/trunk/drivers/net/sungem.c @@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s if (txmac_stat & MAC_TXSTAT_URUN) { netdev_err(dev, "TX MAC xmit underrun\n"); - dev->stats.tx_fifo_errors++; + gp->net_stats.tx_fifo_errors++; } if (txmac_stat & MAC_TXSTAT_MPE) { netdev_err(dev, "TX MAC max packet size error\n"); - dev->stats.tx_errors++; + gp->net_stats.tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TXSTAT_NCE) - dev->stats.collisions += 0x10000; + gp->net_stats.collisions += 0x10000; if (txmac_stat & MAC_TXSTAT_ECE) { - dev->stats.tx_aborted_errors += 0x10000; - dev->stats.collisions += 0x10000; + gp->net_stats.tx_aborted_errors += 0x10000; + gp->net_stats.collisions += 0x10000; } if (txmac_stat & MAC_TXSTAT_LCE) { - dev->stats.tx_aborted_errors += 0x10000; - dev->stats.collisions += 0x10000; + gp->net_stats.tx_aborted_errors += 0x10000; + gp->net_stats.collisions += 0x10000; } /* We do not keep track of MAC_TXSTAT_FCE and @@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s u32 smac = readl(gp->regs + MAC_SMACHINE); netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); - dev->stats.rx_over_errors++; - dev->stats.rx_fifo_errors++; + gp->net_stats.rx_over_errors++; + gp->net_stats.rx_fifo_errors++; ret = gem_rxmac_reset(gp); } if (rxmac_stat & MAC_RXSTAT_ACE) - dev->stats.rx_frame_errors += 0x10000; + gp->net_stats.rx_frame_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_CCE) - dev->stats.rx_crc_errors += 0x10000; + gp->net_stats.rx_crc_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_LCE) - dev->stats.rx_length_errors += 0x10000; + gp->net_stats.rx_length_errors += 0x10000; /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE * events. @@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: no buffer for rx frame\n", gp->dev->name); - dev->stats.rx_dropped++; + gp->net_stats.rx_dropped++; } if (gem_status & GREG_STAT_RXTAGERR) { @@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: corrupt rx tag framing\n", gp->dev->name); - dev->stats.rx_errors++; + gp->net_stats.rx_errors++; goto do_reset; } @@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st break; } gp->tx_skbs[entry] = NULL; - dev->stats.tx_bytes += skb->len; + gp->net_stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gp->init_block->txd[entry]; @@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st entry = NEXT_TX(entry); } - dev->stats.tx_packets++; + gp->net_stats.tx_packets++; dev_kfree_skb_irq(skb); } gp->tx_old = entry; @@ -738,7 +738,6 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) static int gem_rx(struct gem *gp, int work_to_do) { - struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; __sum16 csum; @@ -783,15 +782,15 @@ static int gem_rx(struct gem *gp, int work_to_do) len = (status & RXDCTRL_BUFSZ) >> 16; if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { - dev->stats.rx_errors++; + gp->net_stats.rx_errors++; if (len < ETH_ZLEN) - dev->stats.rx_length_errors++; + gp->net_stats.rx_length_errors++; if (len & RXDCTRL_BAD) - dev->stats.rx_crc_errors++; + gp->net_stats.rx_crc_errors++; /* We'll just return it to GEM. */ drop_it: - dev->stats.rx_dropped++; + gp->net_stats.rx_dropped++; goto next; } @@ -844,8 +843,8 @@ static int gem_rx(struct gem *gp, int work_to_do) netif_receive_skb(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + gp->net_stats.rx_packets++; + gp->net_stats.rx_bytes += len; next: entry = NEXT_RX(entry); @@ -2473,6 +2472,7 @@ static int gem_resume(struct pci_dev *pdev) static struct net_device_stats *gem_get_stats(struct net_device *dev) { struct gem *gp = netdev_priv(dev); + struct net_device_stats *stats = &gp->net_stats; spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); @@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) * so we shield against this */ if (gp->running) { - dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); + stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_FCSERR); - dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); + stats->rx_frame_errors += readl(gp->regs + MAC_AERR); writel(0, gp->regs + MAC_AERR); - dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); + stats->rx_length_errors += readl(gp->regs + MAC_LERR); writel(0, gp->regs + MAC_LERR); - dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); - dev->stats.collisions += + stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); + stats->collisions += (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); writel(0, gp->regs + MAC_ECOLL); @@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - return &dev->stats; + return &gp->net_stats; } static int gem_set_mac_address(struct net_device *dev, void *addr) diff --git a/trunk/drivers/net/sungem.h b/trunk/drivers/net/sungem.h index ede017872367..19905460def6 100644 --- a/trunk/drivers/net/sungem.h +++ b/trunk/drivers/net/sungem.h @@ -994,6 +994,7 @@ struct gem { u32 status; struct napi_struct napi; + struct net_device_stats net_stats; int tx_fifo_sz; int rx_fifo_sz; diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index cc069528b322..7841a8f69998 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2011 Broadcom Corporation. + * Copyright (C) 2005-2010 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -60,14 +60,20 @@ #define BAR_0 0 #define BAR_2 2 +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define TG3_VLAN_TAG_USED 1 +#else +#define TG3_VLAN_TAG_USED 0 +#endif + #include "tg3.h" #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 117 +#define TG3_MIN_NUM 116 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "January 25, 2011" +#define DRV_MODULE_RELDATE "December 3, 2010" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -128,6 +134,9 @@ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) +#define TG3_RX_DMA_ALIGN 16 +#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) + #define TG3_DMA_BYTE_ENAB 64 #define TG3_RX_STD_DMA_SZ 1536 @@ -1776,29 +1785,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val); - switch (val) { - case TG3_CL45_D7_EEERES_STAT_LP_1000T: - switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { - case ASIC_REV_5717: - case ASIC_REV_5719: - case ASIC_REV_57765: - /* Enable SM_DSP clock and tx 6dB coding. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); - - /* Turn off SM_DSP clock. */ - val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | - MII_TG3_AUXCTL_ACTL_TX_6DB; - tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - } - /* Fallthrough */ - case TG3_CL45_D7_EEERES_STAT_LP_100TX: + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) tp->setlpicnt = 2; - } } if (!tp->setlpicnt) { @@ -2988,19 +2977,11 @@ static void tg3_phy_copper_begin(struct tg3 *tp) MII_TG3_AUXCTL_ACTL_TX_6DB; tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { - case ASIC_REV_5717: - case ASIC_REV_57765: - if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) - tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | - MII_TG3_DSP_CH34TP2_HIBW01); - /* Fall through */ - case ASIC_REV_5719: - val = MII_TG3_DSP_TAP26_ALNOKO | - MII_TG3_DSP_TAP26_RMRXSTO | - MII_TG3_DSP_TAP26_OPCSINPT; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - } + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, + val | MII_TG3_DSP_CH34TP2_HIBW01); val = 0; if (tp->link_config.autoneg == AUTONEG_ENABLE) { @@ -4741,6 +4722,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; + bool hw_vlan __maybe_unused = false; + u16 vtag __maybe_unused = 0; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; @@ -4799,12 +4782,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + + copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + TG3_RAW_IP_ALIGN); if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); @@ -4831,11 +4814,30 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) } if (desc->type_flags & RXD_FLAG_VLAN && - !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, - desc->err_vlan & RXD_VLAN_MASK); + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { + vtag = desc->err_vlan & RXD_VLAN_MASK; +#if TG3_VLAN_TAG_USED + if (tp->vlgrp) + hw_vlan = true; + else +#endif + { + struct vlan_ethhdr *ve = (struct vlan_ethhdr *) + __skb_push(skb, VLAN_HLEN); + + memmove(ve, skb->data + VLAN_HLEN, + ETH_ALEN * 2); + ve->h_vlan_proto = htons(ETH_P_8021Q); + ve->h_vlan_TCI = htons(vtag); + } + } - napi_gro_receive(&tnapi->napi, skb); +#if TG3_VLAN_TAG_USED + if (hw_vlan) + vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); + else +#endif + napi_gro_receive(&tnapi->napi, skb); received++; budget--; @@ -5738,9 +5740,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, base_flags |= TXD_FLAG_TCPUDP_CSUM; } +#if TG3_VLAN_TAG_USED if (vlan_tx_tag_present(skb)) base_flags |= (TXD_FLAG_VLAN | (vlan_tx_tag_get(skb) << 16)); +#endif len = skb_headlen(skb); @@ -5982,10 +5986,11 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, } } } - +#if TG3_VLAN_TAG_USED if (vlan_tx_tag_present(skb)) base_flags |= (TXD_FLAG_VLAN | (vlan_tx_tag_get(skb) << 16)); +#endif if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && !mss && skb->len > VLAN_ETH_FRAME_LEN) @@ -7829,7 +7834,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) TG3_CPMU_DBTMR1_LNKIDLE_2047US); tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR1_APE_TX_2047US | TG3_CPMU_DBTMR2_TXIDXEQ_2047US); } @@ -8103,9 +8108,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) { + if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { /* Setup replenish threshold. */ tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); @@ -8223,12 +8227,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | - TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | - TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); - val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | - TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | - TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; } tw32(TG3_RDMA_RSRVCTRL_REG, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); @@ -8350,8 +8350,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && - tp->irq_cnt > 1) { + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { val = tr32(MSGINT_MODE); val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; tw32(MSGINT_MODE, val); @@ -9091,8 +9090,7 @@ static void tg3_ints_init(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { u32 msi_mode = tr32(MSGINT_MODE); - if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && - tp->irq_cnt > 1) + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) msi_mode |= MSGINT_MODE_MULTIVEC_EN; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } @@ -9534,10 +9532,17 @@ static void __tg3_set_rx_mode(struct net_device *dev) rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | RX_MODE_KEEP_VLAN_TAG); -#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG * flag clear. */ +#if TG3_VLAN_TAG_USED + if (!tp->vlgrp && + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#else + /* By definition, VLAN is disabled always in this + * case. + */ if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) rx_mode |= RX_MODE_KEEP_VLAN_TAG; #endif @@ -10868,16 +10873,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) if (loopback_mode == TG3_MAC_LOOPBACK) { /* HW errata - mac loopback fails in some cases on 5780. * Normal traffic and PHY loopback are not affected by - * errata. Also, the MAC loopback test is deprecated for - * all newer ASIC revisions. + * errata. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || - (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) return 0; - mac_mode = tp->mac_mode & - ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - mac_mode |= MAC_MODE_PORT_INT_LPBACK; + mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | + MAC_MODE_PORT_INT_LPBACK; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) mac_mode |= MAC_MODE_LINK_POLARITY; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) @@ -10899,8 +10901,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) tg3_writephy(tp, MII_BMCR, val); udelay(40); - mac_mode = tp->mac_mode & - ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_writephy(tp, MII_TG3_FET_PTEST, MII_TG3_FET_PTEST_FRC_TX_LINK | @@ -10928,13 +10929,6 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) MII_TG3_EXT_CTRL_LNK3_LED_MODE); } tw32(MAC_MODE, mac_mode); - - /* Wait for link */ - for (i = 0; i < 100; i++) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - break; - mdelay(1); - } } else { return -EINVAL; } @@ -11041,19 +11035,14 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) static int tg3_test_loopback(struct tg3 *tp) { int err = 0; - u32 eee_cap, cpmuctrl = 0; + u32 cpmuctrl = 0; if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; - eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; - tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; - err = tg3_reset_hw(tp, 1); - if (err) { - err = TG3_LOOPBACK_FAILED; - goto done; - } + if (err) + return TG3_LOOPBACK_FAILED; /* Turn off gphy autopowerdown. */ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) @@ -11073,10 +11062,8 @@ static int tg3_test_loopback(struct tg3 *tp) udelay(10); } - if (status != CPMU_MUTEX_GNT_DRIVER) { - err = TG3_LOOPBACK_FAILED; - goto done; - } + if (status != CPMU_MUTEX_GNT_DRIVER) + return TG3_LOOPBACK_FAILED; /* Turn off link-based power management. */ cpmuctrl = tr32(TG3_CPMU_CTRL); @@ -11105,9 +11092,6 @@ static int tg3_test_loopback(struct tg3 *tp) if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); -done: - tp->phy_flags |= eee_cap; - return err; } @@ -11214,9 +11198,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || - ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !netif_running(dev))) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11232,9 +11214,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || - ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !netif_running(dev))) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11250,6 +11230,31 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +#if TG3_VLAN_TAG_USED +static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) { + tp->vlgrp = grp; + return; + } + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 0); + + tp->vlgrp = grp; + + /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ + __tg3_set_rx_mode(dev); + + tg3_netif_start(tp); + + tg3_full_unlock(tp); +} +#endif + static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct tg3 *tp = netdev_priv(dev); @@ -13061,7 +13066,9 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); static void inline vlan_features_add(struct net_device *dev, unsigned long flags) { +#if TG3_VLAN_TAG_USED dev->vlan_features |= flags; +#endif } static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) @@ -13318,9 +13325,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } /* Determine TSO capabilities */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) - ; /* Do nothing. HW bug. */ - else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) @@ -13371,8 +13376,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; } - if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && - tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || @@ -13390,8 +13394,42 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; tp->pcie_readrq = 4096; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tp->pcie_readrq = 2048; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + u16 word; + + pci_read_config_word(tp->pdev, + tp->pcie_cap + PCI_EXP_LNKSTA, + &word); + switch (word & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + word &= PCI_EXP_LNKSTA_NLW; + word >>= PCI_EXP_LNKSTA_NLW_SHIFT; + switch (word) { + case 2: + tp->pcie_readrq = 2048; + break; + case 4: + tp->pcie_readrq = 1024; + break; + } + break; + + case PCI_EXP_LNKSTA_CLS_5_0GB: + word &= PCI_EXP_LNKSTA_NLW; + word >>= PCI_EXP_LNKSTA_NLW_SHIFT; + switch (word) { + case 1: + tp->pcie_readrq = 2048; + break; + case 2: + tp->pcie_readrq = 1024; + break; + case 4: + tp->pcie_readrq = 512; + break; + } + } + } pcie_set_readrq(tp->pdev, tp->pcie_readrq); @@ -13823,11 +13861,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; - tp->rx_offset = NET_IP_ALIGN; + tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { - tp->rx_offset = 0; + tp->rx_offset -= NET_IP_ALIGN; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif @@ -14591,6 +14629,9 @@ static const struct net_device_ops tg3_netdev_ops = { .ndo_do_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, +#if TG3_VLAN_TAG_USED + .ndo_vlan_rx_register = tg3_vlan_rx_register, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tg3_poll_controller, #endif @@ -14607,6 +14648,9 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { .ndo_do_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, +#if TG3_VLAN_TAG_USED + .ndo_vlan_rx_register = tg3_vlan_rx_register, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tg3_poll_controller, #endif @@ -14656,7 +14700,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); +#if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; +#endif tp = netdev_priv(dev); tp->pdev = pdev; diff --git a/trunk/drivers/net/tg3.h b/trunk/drivers/net/tg3.h index 73884b69b749..d62c8d937c82 100644 --- a/trunk/drivers/net/tg3.h +++ b/trunk/drivers/net/tg3.h @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2011 Broadcom Corporation. + * Copyright (C) 2007-2010 Broadcom Corporation. */ #ifndef _T3_H @@ -141,7 +141,6 @@ #define CHIPREV_ID_57780_A1 0x57780001 #define CHIPREV_ID_5717_A0 0x05717000 #define CHIPREV_ID_57765_A0 0x57785000 -#define CHIPREV_ID_5719_A0 0x05719000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -1106,7 +1105,7 @@ #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 -#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff #define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc #define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 @@ -1334,10 +1333,6 @@ #define TG3_RDMA_RSRVCTRL_REG 0x00004900 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 -#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 -#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 -#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 -#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 /* 0x4904 --> 0x4910 unused */ @@ -2113,10 +2108,6 @@ #define MII_TG3_DSP_TAP1 0x0001 #define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 -#define MII_TG3_DSP_TAP26 0x001a -#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 -#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 -#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 #define MII_TG3_DSP_AADJ1CH0 0x001f #define MII_TG3_DSP_CH34TP2 0x4022 #define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 @@ -2817,6 +2808,9 @@ struct tg3 { u32 rx_std_max_post; u32 rx_offset; u32 rx_pkt_map_sz; +#if TG3_VLAN_TAG_USED + struct vlan_group *vlgrp; +#endif /* begin "everything else" cacheline(s) section */ diff --git a/trunk/drivers/net/tlan.c b/trunk/drivers/net/tlan.c index 0678e7e71f19..f8e463cd8ecc 100644 --- a/trunk/drivers/net/tlan.c +++ b/trunk/drivers/net/tlan.c @@ -63,45 +63,45 @@ * - Other minor stuff * * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's - * network cleanup in 2.3.43pre7 (Tigran & myself) - * - Minor stuff. + * network cleanup in 2.3.43pre7 (Tigran & myself) + * - Minor stuff. * - * v1.5 March 22, 2000 - Fixed another timer bug that would hang the - * driver if no cable/link were present. + * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver + * if no cable/link were present. * - Cosmetic changes. * - TODO: Port completely to new PCI/DMA API - * Auto-Neg fallback. - * - * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. - * Haven't tested it though, as the kernel support - * is currently broken (2.3.99p4p3). - * - Updated tlan.txt accordingly. - * - Adjusted minimum/maximum frame length. - * - There is now a TLAN website up at - * http://hp.sourceforge.net/ - * - * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now - * reports PHY information when used with Donald - * Beckers userspace MII diagnostics utility. - * - * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings. - * - Added link information to Auto-Neg and forced - * modes. When NIC operates with auto-neg the driver - * will report Link speed & duplex modes as well as - * link partner abilities. When forced link is used, - * the driver will report status of the established - * link. - * Please read tlan.txt for additional information. - * - Removed call to check_region(), and used - * return value of request_region() instead. + * Auto-Neg fallback. + * + * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't + * tested it though, as the kernel support is currently + * broken (2.3.99p4p3). + * - Updated tlan.txt accordingly. + * - Adjusted minimum/maximum frame length. + * - There is now a TLAN website up at + * http://hp.sourceforge.net/ + * + * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now + * reports PHY information when used with Donald + * Beckers userspace MII diagnostics utility. + * + * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings. + * - Added link information to Auto-Neg and forced + * modes. When NIC operates with auto-neg the driver + * will report Link speed & duplex modes as well as + * link partner abilities. When forced link is used, + * the driver will report status of the established + * link. + * Please read tlan.txt for additional information. + * - Removed call to check_region(), and used + * return value of request_region() instead. * * v1.8a May 28, 2000 - Minor updates. * * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues. - * - Updated with timer fixes from Andrew Morton. - * - Fixed module race in TLan_Open. - * - Added routine to monitor PHY status. - * - Added activity led support for Proliant devices. + * - Updated with timer fixes from Andrew Morton. + * - Fixed module race in TLan_Open. + * - Added routine to monitor PHY status. + * - Added activity led support for Proliant devices. * * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers * like the Compaq NetFlex3/E. @@ -111,8 +111,8 @@ * hardware probe is done with kernel API and * TLan_EisaProbe. * - Adjusted debug information for probing. - * - Fixed bug that would cause general debug - * information to be printed after driver removal. + * - Fixed bug that would cause general debug information + * to be printed after driver removal. * - Added transmit timeout handling. * - Fixed OOM return values in tlan_probe. * - Fixed possible mem leak in tlan_exit @@ -136,8 +136,8 @@ * * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.) * - * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues - * when link can't be established. + * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues + * when link can't be established. * - Added the bbuf option as a kernel parameter. * - Fixed ioaddr probe bug. * - Fixed stupid deadlock with MII interrupts. @@ -147,30 +147,28 @@ * TLAN v1.0 silicon. This needs to be investigated * further. * - * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per. - * interrupt. Thanks goes to - * Adam Keys - * Denis Beaudoin - * for providing the patch. - * - Fixed auto-neg output when using multiple - * adapters. - * - Converted to use new taskq interface. + * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per. + * interrupt. Thanks goes to + * Adam Keys + * Denis Beaudoin + * for providing the patch. + * - Fixed auto-neg output when using multiple + * adapters. + * - Converted to use new taskq interface. * - * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.) + * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.) * * Samuel Chessman New Maintainer! * * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be - * 10T half duplex no loopback - * Thanks to Gunnar Eikman + * 10T half duplex no loopback + * Thanks to Gunnar Eikman * * Sakari Ailus : * * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway. - * v1.16 Jan 6 2011 - Make checkpatch.pl happy. - * v1.17 Jan 6 2011 - Add suspend/resume support. * - ******************************************************************************/ + *******************************************************************************/ #include #include @@ -187,11 +185,13 @@ #include "tlan.h" +typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 ); + /* For removing EISA devices */ -static struct net_device *tlan_eisa_devices; +static struct net_device *TLan_Eisa_Devices; -static int tlan_devices_installed; +static int TLanDevicesInstalled; /* Set speed, duplex and aui settings */ static int aui[MAX_TLAN_BOARDS]; @@ -202,8 +202,7 @@ module_param_array(aui, int, NULL, 0); module_param_array(duplex, int, NULL, 0); module_param_array(speed, int, NULL, 0); MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); -MODULE_PARM_DESC(duplex, - "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); +MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); MODULE_AUTHOR("Maintainer: Samuel Chessman "); @@ -219,144 +218,139 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); -static const char tlan_signature[] = "TLAN"; -static const char tlan_banner[] = "ThunderLAN driver v1.17\n"; +static const char TLanSignature[] = "TLAN"; +static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; static int tlan_have_pci; static int tlan_have_eisa; -static const char * const media[] = { - "10BaseT-HD", "10BaseT-FD", "100baseTx-HD", - "100BaseTx-FD", "100BaseT4", NULL +static const char *media[] = { + "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", + "100baseTx-FD", "100baseT4", NULL }; static struct board { - const char *device_label; - u32 flags; - u16 addr_ofs; + const char *deviceLabel; + u32 flags; + u16 addrOfs; } board_info[] = { { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent 10/100 TX PCI UTP", - TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent Dual 10/100 TX PCI UTP", - TLAN_ADAPTER_NONE, 0x83 }, - { "Compaq Netelligent 10/100 TX Embedded UTP", - TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, - { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 }, - { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 }, + { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, + { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq NetFlex-3/E", - TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ + TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, - { "Compaq NetFlex-3/E", - TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ + { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ }; static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, { 0,} }; MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); -static void tlan_eisa_probe(void); -static void tlan_eisa_cleanup(void); -static int tlan_init(struct net_device *); -static int tlan_open(struct net_device *dev); -static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *); -static irqreturn_t tlan_handle_interrupt(int, void *); -static int tlan_close(struct net_device *); -static struct net_device_stats *tlan_get_stats(struct net_device *); -static void tlan_set_multicast_list(struct net_device *); -static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static int tlan_probe1(struct pci_dev *pdev, long ioaddr, - int irq, int rev, const struct pci_device_id *ent); -static void tlan_tx_timeout(struct net_device *dev); -static void tlan_tx_timeout_work(struct work_struct *work); -static int tlan_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent); - -static u32 tlan_handle_tx_eof(struct net_device *, u16); -static u32 tlan_handle_stat_overflow(struct net_device *, u16); -static u32 tlan_handle_rx_eof(struct net_device *, u16); -static u32 tlan_handle_dummy(struct net_device *, u16); -static u32 tlan_handle_tx_eoc(struct net_device *, u16); -static u32 tlan_handle_status_check(struct net_device *, u16); -static u32 tlan_handle_rx_eoc(struct net_device *, u16); - -static void tlan_timer(unsigned long); - -static void tlan_reset_lists(struct net_device *); -static void tlan_free_lists(struct net_device *); -static void tlan_print_dio(u16); -static void tlan_print_list(struct tlan_list *, char *, int); -static void tlan_read_and_clear_stats(struct net_device *, int); -static void tlan_reset_adapter(struct net_device *); -static void tlan_finish_reset(struct net_device *); -static void tlan_set_mac(struct net_device *, int areg, char *mac); - -static void tlan_phy_print(struct net_device *); -static void tlan_phy_detect(struct net_device *); -static void tlan_phy_power_down(struct net_device *); -static void tlan_phy_power_up(struct net_device *); -static void tlan_phy_reset(struct net_device *); -static void tlan_phy_start_link(struct net_device *); -static void tlan_phy_finish_auto_neg(struct net_device *); +static void TLan_EisaProbe( void ); +static void TLan_Eisa_Cleanup( void ); +static int TLan_Init( struct net_device * ); +static int TLan_Open( struct net_device *dev ); +static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); +static irqreturn_t TLan_HandleInterrupt( int, void *); +static int TLan_Close( struct net_device *); +static struct net_device_stats *TLan_GetStats( struct net_device *); +static void TLan_SetMulticastList( struct net_device *); +static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); +static int TLan_probe1( struct pci_dev *pdev, long ioaddr, + int irq, int rev, const struct pci_device_id *ent); +static void TLan_tx_timeout( struct net_device *dev); +static void TLan_tx_timeout_work(struct work_struct *work); +static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); + +static u32 TLan_HandleTxEOF( struct net_device *, u16 ); +static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); +static u32 TLan_HandleRxEOF( struct net_device *, u16 ); +static u32 TLan_HandleDummy( struct net_device *, u16 ); +static u32 TLan_HandleTxEOC( struct net_device *, u16 ); +static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); +static u32 TLan_HandleRxEOC( struct net_device *, u16 ); + +static void TLan_Timer( unsigned long ); + +static void TLan_ResetLists( struct net_device * ); +static void TLan_FreeLists( struct net_device * ); +static void TLan_PrintDio( u16 ); +static void TLan_PrintList( TLanList *, char *, int ); +static void TLan_ReadAndClearStats( struct net_device *, int ); +static void TLan_ResetAdapter( struct net_device * ); +static void TLan_FinishReset( struct net_device * ); +static void TLan_SetMac( struct net_device *, int areg, char *mac ); + +static void TLan_PhyPrint( struct net_device * ); +static void TLan_PhyDetect( struct net_device * ); +static void TLan_PhyPowerDown( struct net_device * ); +static void TLan_PhyPowerUp( struct net_device * ); +static void TLan_PhyReset( struct net_device * ); +static void TLan_PhyStartLink( struct net_device * ); +static void TLan_PhyFinishAutoNeg( struct net_device * ); #ifdef MONITOR -static void tlan_phy_monitor(struct net_device *); +static void TLan_PhyMonitor( struct net_device * ); #endif /* - static int tlan_phy_nop(struct net_device *); - static int tlan_phy_internal_check(struct net_device *); - static int tlan_phy_internal_service(struct net_device *); - static int tlan_phy_dp83840a_check(struct net_device *); +static int TLan_PhyNop( struct net_device * ); +static int TLan_PhyInternalCheck( struct net_device * ); +static int TLan_PhyInternalService( struct net_device * ); +static int TLan_PhyDp83840aCheck( struct net_device * ); */ -static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); -static void tlan_mii_send_data(u16, u32, unsigned); -static void tlan_mii_sync(u16); -static void tlan_mii_write_reg(struct net_device *, u16, u16, u16); +static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); +static void TLan_MiiSendData( u16, u32, unsigned ); +static void TLan_MiiSync( u16 ); +static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); -static void tlan_ee_send_start(u16); -static int tlan_ee_send_byte(u16, u8, int); -static void tlan_ee_receive_byte(u16, u8 *, int); -static int tlan_ee_read_byte(struct net_device *, u8, u8 *); +static void TLan_EeSendStart( u16 ); +static int TLan_EeSendByte( u16, u8, int ); +static void TLan_EeReceiveByte( u16, u8 *, int ); +static int TLan_EeReadByte( struct net_device *, u8, u8 * ); static inline void -tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb) +TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) { unsigned long addr = (unsigned long)skb; tag->buffer[9].address = addr; @@ -364,7 +358,7 @@ tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb) } static inline struct sk_buff * -tlan_get_skb(const struct tlan_list *tag) +TLan_GetSKB( const struct tlan_list_tag *tag) { unsigned long addr; @@ -373,50 +367,50 @@ tlan_get_skb(const struct tlan_list *tag) return (struct sk_buff *) addr; } -static u32 -(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = { + +static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { NULL, - tlan_handle_tx_eof, - tlan_handle_stat_overflow, - tlan_handle_rx_eof, - tlan_handle_dummy, - tlan_handle_tx_eoc, - tlan_handle_status_check, - tlan_handle_rx_eoc + TLan_HandleTxEOF, + TLan_HandleStatOverflow, + TLan_HandleRxEOF, + TLan_HandleDummy, + TLan_HandleTxEOC, + TLan_HandleStatusCheck, + TLan_HandleRxEOC }; static inline void -tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) +TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); unsigned long flags = 0; if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - if (priv->timer.function != NULL && - priv->timer_type != TLAN_TIMER_ACTIVITY) { + if ( priv->timer.function != NULL && + priv->timerType != TLAN_TIMER_ACTIVITY ) { if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); return; } - priv->timer.function = tlan_timer; + priv->timer.function = TLan_Timer; if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); priv->timer.data = (unsigned long) dev; - priv->timer_set_at = jiffies; - priv->timer_type = type; + priv->timerSetAt = jiffies; + priv->timerType = type; mod_timer(&priv->timer, jiffies + ticks); -} +} /* TLan_SetTimer */ /***************************************************************************** ****************************************************************************** -ThunderLAN driver primary functions + ThunderLAN Driver Primary Functions -these functions are more or less common to all linux network drivers. + These functions are more or less common to all Linux network drivers. ****************************************************************************** *****************************************************************************/ @@ -425,117 +419,49 @@ these functions are more or less common to all linux network drivers. -/*************************************************************** - * tlan_remove_one - * - * Returns: - * Nothing - * Parms: - * None - * - * Goes through the TLanDevices list and frees the device - * structs and memory associated with each device (lists - * and buffers). It also ureserves the IO port regions - * associated with this device. - * - **************************************************************/ + /*************************************************************** + * tlan_remove_one + * + * Returns: + * Nothing + * Parms: + * None + * + * Goes through the TLanDevices list and frees the device + * structs and memory associated with each device (lists + * and buffers). It also ureserves the IO port regions + * associated with this device. + * + **************************************************************/ -static void __devexit tlan_remove_one(struct pci_dev *pdev) +static void __devexit tlan_remove_one( struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct tlan_priv *priv = netdev_priv(dev); + struct net_device *dev = pci_get_drvdata( pdev ); + TLanPrivateInfo *priv = netdev_priv(dev); - unregister_netdev(dev); + unregister_netdev( dev ); - if (priv->dma_storage) { - pci_free_consistent(priv->pci_dev, - priv->dma_size, priv->dma_storage, - priv->dma_storage_dma); + if ( priv->dmaStorage ) { + pci_free_consistent(priv->pciDev, + priv->dmaSize, priv->dmaStorage, + priv->dmaStorageDMA ); } #ifdef CONFIG_PCI pci_release_regions(pdev); #endif - free_netdev(dev); - - pci_set_drvdata(pdev, NULL); -} - -static void tlan_start(struct net_device *dev) -{ - tlan_reset_lists(dev); - /* NOTE: It might not be necessary to read the stats before a - reset if you don't care what the values are. - */ - tlan_read_and_clear_stats(dev, TLAN_IGNORE); - tlan_reset_adapter(dev); - netif_wake_queue(dev); -} - -static void tlan_stop(struct net_device *dev) -{ - struct tlan_priv *priv = netdev_priv(dev); - - tlan_read_and_clear_stats(dev, TLAN_RECORD); - outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); - /* Reset and power down phy */ - tlan_reset_adapter(dev); - if (priv->timer.function != NULL) { - del_timer_sync(&priv->timer); - priv->timer.function = NULL; - } -} - -#ifdef CONFIG_PM - -static int tlan_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (netif_running(dev)) - tlan_stop(dev); - - netif_device_detach(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); + free_netdev( dev ); - return 0; + pci_set_drvdata( pdev, NULL ); } -static int tlan_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_enable_wake(pdev, 0, 0); - netif_device_attach(dev); - - if (netif_running(dev)) - tlan_start(dev); - - return 0; -} - -#else /* CONFIG_PM */ - -#define tlan_suspend NULL -#define tlan_resume NULL - -#endif /* CONFIG_PM */ - - static struct pci_driver tlan_driver = { .name = "tlan", .id_table = tlan_pci_tbl, .probe = tlan_init_one, .remove = __devexit_p(tlan_remove_one), - .suspend = tlan_suspend, - .resume = tlan_resume, }; static int __init tlan_probe(void) @@ -556,13 +482,13 @@ static int __init tlan_probe(void) } TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); - tlan_eisa_probe(); + TLan_EisaProbe(); printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", - tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s", - tlan_have_pci, tlan_have_eisa); + TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", + tlan_have_pci, tlan_have_eisa); - if (tlan_devices_installed == 0) { + if (TLanDevicesInstalled == 0) { rc = -ENODEV; goto err_out_pci_unreg; } @@ -575,39 +501,39 @@ static int __init tlan_probe(void) } -static int __devinit tlan_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit tlan_init_one( struct pci_dev *pdev, + const struct pci_device_id *ent) { - return tlan_probe1(pdev, -1, -1, 0, ent); + return TLan_probe1( pdev, -1, -1, 0, ent); } /* -*************************************************************** -* tlan_probe1 -* -* Returns: -* 0 on success, error code on error -* Parms: -* none -* -* The name is lower case to fit in with all the rest of -* the netcard_probe names. This function looks for -* another TLan based adapter, setting it up with the -* allocated device struct if one is found. -* tlan_probe has been ported to the new net API and -* now allocates its own device structure. This function -* is also used by modules. -* -**************************************************************/ - -static int __devinit tlan_probe1(struct pci_dev *pdev, + *************************************************************** + * tlan_probe1 + * + * Returns: + * 0 on success, error code on error + * Parms: + * none + * + * The name is lower case to fit in with all the rest of + * the netcard_probe names. This function looks for + * another TLan based adapter, setting it up with the + * allocated device struct if one is found. + * tlan_probe has been ported to the new net API and + * now allocates its own device structure. This function + * is also used by modules. + * + **************************************************************/ + +static int __devinit TLan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, - const struct pci_device_id *ent) + const struct pci_device_id *ent ) { struct net_device *dev; - struct tlan_priv *priv; + TLanPrivateInfo *priv; u16 device_id; int reg, rc = -ENODEV; @@ -617,7 +543,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, if (rc) return rc; - rc = pci_request_regions(pdev, tlan_signature); + rc = pci_request_regions(pdev, TLanSignature); if (rc) { printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); goto err_out; @@ -625,7 +551,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, } #endif /* CONFIG_PCI */ - dev = alloc_etherdev(sizeof(struct tlan_priv)); + dev = alloc_etherdev(sizeof(TLanPrivateInfo)); if (dev == NULL) { printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); rc = -ENOMEM; @@ -635,28 +561,26 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, priv = netdev_priv(dev); - priv->pci_dev = pdev; + priv->pciDev = pdev; priv->dev = dev; /* Is this a PCI device? */ if (pdev) { - u32 pci_io_base = 0; + u32 pci_io_base = 0; priv->adapter = &board_info[ent->driver_data]; rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - printk(KERN_ERR - "TLAN: No suitable PCI mapping available.\n"); + printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); goto err_out_free_dev; } - for (reg = 0; reg <= 5; reg++) { + for ( reg= 0; reg <= 5; reg ++ ) { if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { pci_io_base = pci_resource_start(pdev, reg); - TLAN_DBG(TLAN_DEBUG_GNRL, - "IO mapping is available at %x.\n", - pci_io_base); + TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", + pci_io_base); break; } } @@ -668,7 +592,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, dev->base_addr = pci_io_base; dev->irq = pdev->irq; - priv->adapter_rev = pdev->revision; + priv->adapterRev = pdev->revision; pci_set_master(pdev); pci_set_drvdata(pdev, dev); @@ -678,11 +602,11 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, device_id = inw(ioaddr + EISA_ID2); priv->is_eisa = 1; if (device_id == 0x20F1) { - priv->adapter = &board_info[13]; /* NetFlex-3/E */ - priv->adapter_rev = 23; /* TLAN 2.3 */ + priv->adapter = &board_info[13]; /* NetFlex-3/E */ + priv->adapterRev = 23; /* TLAN 2.3 */ } else { priv->adapter = &board_info[14]; - priv->adapter_rev = 10; /* TLAN 1.0 */ + priv->adapterRev = 10; /* TLAN 1.0 */ } dev->base_addr = ioaddr; dev->irq = irq; @@ -696,11 +620,11 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; - if (priv->speed == 0x1) + if (priv->speed == 0x1) { priv->speed = TLAN_SPEED_10; - else if (priv->speed == 0x2) + } else if (priv->speed == 0x2) { priv->speed = TLAN_SPEED_100; - + } debug = priv->debug = dev->mem_end; } else { priv->aui = aui[boards_found]; @@ -711,11 +635,11 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, /* This will be used when we get an adapter error from * within our irq handler */ - INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work); + INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); spin_lock_init(&priv->lock); - rc = tlan_init(dev); + rc = TLan_Init(dev); if (rc) { printk(KERN_ERR "TLAN: Could not set up device.\n"); goto err_out_free_dev; @@ -728,29 +652,29 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, } - tlan_devices_installed++; + TLanDevicesInstalled++; boards_found++; /* pdev is NULL if this is an EISA device */ if (pdev) tlan_have_pci++; else { - priv->next_device = tlan_eisa_devices; - tlan_eisa_devices = dev; + priv->nextDevice = TLan_Eisa_Devices; + TLan_Eisa_Devices = dev; tlan_have_eisa++; } printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", - dev->name, - (int) dev->irq, - (int) dev->base_addr, - priv->adapter->device_label, - priv->adapter_rev); + dev->name, + (int) dev->irq, + (int) dev->base_addr, + priv->adapter->deviceLabel, + priv->adapterRev); return 0; err_out_uninit: - pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage, - priv->dma_storage_dma); + pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, + priv->dmaStorageDMA ); err_out_free_dev: free_netdev(dev); err_out_regions: @@ -765,23 +689,22 @@ static int __devinit tlan_probe1(struct pci_dev *pdev, } -static void tlan_eisa_cleanup(void) +static void TLan_Eisa_Cleanup(void) { struct net_device *dev; - struct tlan_priv *priv; + TLanPrivateInfo *priv; - while (tlan_have_eisa) { - dev = tlan_eisa_devices; + while( tlan_have_eisa ) { + dev = TLan_Eisa_Devices; priv = netdev_priv(dev); - if (priv->dma_storage) { - pci_free_consistent(priv->pci_dev, priv->dma_size, - priv->dma_storage, - priv->dma_storage_dma); + if (priv->dmaStorage) { + pci_free_consistent(priv->pciDev, priv->dmaSize, + priv->dmaStorage, priv->dmaStorageDMA ); } - release_region(dev->base_addr, 0x10); - unregister_netdev(dev); - tlan_eisa_devices = priv->next_device; - free_netdev(dev); + release_region( dev->base_addr, 0x10); + unregister_netdev( dev ); + TLan_Eisa_Devices = priv->nextDevice; + free_netdev( dev ); tlan_have_eisa--; } } @@ -792,7 +715,7 @@ static void __exit tlan_exit(void) pci_unregister_driver(&tlan_driver); if (tlan_have_eisa) - tlan_eisa_cleanup(); + TLan_Eisa_Cleanup(); } @@ -803,24 +726,24 @@ module_exit(tlan_exit); -/************************************************************** - * tlan_eisa_probe - * - * Returns: 0 on success, 1 otherwise - * - * Parms: None - * - * - * This functions probes for EISA devices and calls - * TLan_probe1 when one is found. - * - *************************************************************/ + /************************************************************** + * TLan_EisaProbe + * + * Returns: 0 on success, 1 otherwise + * + * Parms: None + * + * + * This functions probes for EISA devices and calls + * TLan_probe1 when one is found. + * + *************************************************************/ -static void __init tlan_eisa_probe(void) +static void __init TLan_EisaProbe (void) { - long ioaddr; - int rc = -ENODEV; - int irq; + long ioaddr; + int rc = -ENODEV; + int irq; u16 device_id; if (!EISA_bus) { @@ -831,16 +754,15 @@ static void __init tlan_eisa_probe(void) /* Loop through all slots of the EISA bus */ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", - (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID)); - TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", - (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2)); + TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); + TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); - TLAN_DBG(TLAN_DEBUG_PROBE, - "Probing for EISA adapter at IO: 0x%4x : ", - (int) ioaddr); - if (request_region(ioaddr, 0x10, tlan_signature) == NULL) + TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", + (int) ioaddr); + if (request_region(ioaddr, 0x10, TLanSignature) == NULL) goto out; if (inw(ioaddr + EISA_ID) != 0x110E) { @@ -850,326 +772,326 @@ static void __init tlan_eisa_probe(void) device_id = inw(ioaddr + EISA_ID2); if (device_id != 0x20F1 && device_id != 0x40F1) { - release_region(ioaddr, 0x10); + release_region (ioaddr, 0x10); goto out; } - /* check if adapter is enabled */ - if (inb(ioaddr + EISA_CR) != 0x1) { - release_region(ioaddr, 0x10); + if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ + release_region (ioaddr, 0x10); goto out2; } if (debug == 0x10) - printk(KERN_INFO "Found one\n"); + printk("Found one\n"); /* Get irq from board */ - switch (inb(ioaddr + 0xcc0)) { - case(0x10): - irq = 5; - break; - case(0x20): - irq = 9; - break; - case(0x40): - irq = 10; - break; - case(0x80): - irq = 11; - break; - default: - goto out; + switch (inb(ioaddr + 0xCC0)) { + case(0x10): + irq=5; + break; + case(0x20): + irq=9; + break; + case(0x40): + irq=10; + break; + case(0x80): + irq=11; + break; + default: + goto out; } /* Setup the newly found eisa adapter */ - rc = tlan_probe1(NULL, ioaddr, irq, - 12, NULL); + rc = TLan_probe1( NULL, ioaddr, irq, + 12, NULL); continue; -out: - if (debug == 0x10) - printk(KERN_INFO "None found\n"); - continue; + out: + if (debug == 0x10) + printk("None found\n"); + continue; -out2: - if (debug == 0x10) - printk(KERN_INFO "Card found but it is not enabled, skipping\n"); - continue; + out2: if (debug == 0x10) + printk("Card found but it is not enabled, skipping\n"); + continue; } -} +} /* TLan_EisaProbe */ #ifdef CONFIG_NET_POLL_CONTROLLER -static void tlan_poll(struct net_device *dev) +static void TLan_Poll(struct net_device *dev) { disable_irq(dev->irq); - tlan_handle_interrupt(dev->irq, dev); + TLan_HandleInterrupt(dev->irq, dev); enable_irq(dev->irq); } #endif -static const struct net_device_ops tlan_netdev_ops = { - .ndo_open = tlan_open, - .ndo_stop = tlan_close, - .ndo_start_xmit = tlan_start_tx, - .ndo_tx_timeout = tlan_tx_timeout, - .ndo_get_stats = tlan_get_stats, - .ndo_set_multicast_list = tlan_set_multicast_list, - .ndo_do_ioctl = tlan_ioctl, +static const struct net_device_ops TLan_netdev_ops = { + .ndo_open = TLan_Open, + .ndo_stop = TLan_Close, + .ndo_start_xmit = TLan_StartTx, + .ndo_tx_timeout = TLan_tx_timeout, + .ndo_get_stats = TLan_GetStats, + .ndo_set_multicast_list = TLan_SetMulticastList, + .ndo_do_ioctl = TLan_ioctl, .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tlan_poll, + .ndo_poll_controller = TLan_Poll, #endif }; -/*************************************************************** - * tlan_init - * - * Returns: - * 0 on success, error code otherwise. - * Parms: - * dev The structure of the device to be - * init'ed. - * - * This function completes the initialization of the - * device structure and driver. It reserves the IO - * addresses, allocates memory for the lists and bounce - * buffers, retrieves the MAC address from the eeprom - * and assignes the device's methods. - * - **************************************************************/ - -static int tlan_init(struct net_device *dev) + /*************************************************************** + * TLan_Init + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev The structure of the device to be + * init'ed. + * + * This function completes the initialization of the + * device structure and driver. It reserves the IO + * addresses, allocates memory for the lists and bounce + * buffers, retrieves the MAC address from the eeprom + * and assignes the device's methods. + * + **************************************************************/ + +static int TLan_Init( struct net_device *dev ) { int dma_size; - int err; + int err; int i; - struct tlan_priv *priv; + TLanPrivateInfo *priv; priv = netdev_priv(dev); - dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS) - * (sizeof(struct tlan_list)); - priv->dma_storage = pci_alloc_consistent(priv->pci_dev, - dma_size, - &priv->dma_storage_dma); - priv->dma_size = dma_size; - - if (priv->dma_storage == NULL) { - printk(KERN_ERR - "TLAN: Could not allocate lists and buffers for %s.\n", - dev->name); + dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) + * ( sizeof(TLanList) ); + priv->dmaStorage = pci_alloc_consistent(priv->pciDev, + dma_size, &priv->dmaStorageDMA); + priv->dmaSize = dma_size; + + if ( priv->dmaStorage == NULL ) { + printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", + dev->name ); return -ENOMEM; } - memset(priv->dma_storage, 0, dma_size); - priv->rx_list = (struct tlan_list *) - ALIGN((unsigned long)priv->dma_storage, 8); - priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8); - priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS; - priv->tx_list_dma = - priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS; + memset( priv->dmaStorage, 0, dma_size ); + priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); + priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); + priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; + priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; err = 0; - for (i = 0; i < 6 ; i++) - err |= tlan_ee_read_byte(dev, - (u8) priv->adapter->addr_ofs + i, - (u8 *) &dev->dev_addr[i]); - if (err) { + for ( i = 0; i < 6 ; i++ ) + err |= TLan_EeReadByte( dev, + (u8) priv->adapter->addrOfs + i, + (u8 *) &dev->dev_addr[i] ); + if ( err ) { printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", - dev->name, - err); + dev->name, + err ); } dev->addr_len = 6; netif_carrier_off(dev); /* Device methods */ - dev->netdev_ops = &tlan_netdev_ops; + dev->netdev_ops = &TLan_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; return 0; -} +} /* TLan_Init */ -/*************************************************************** - * tlan_open - * - * Returns: - * 0 on success, error code otherwise. - * Parms: - * dev Structure of device to be opened. - * - * This routine puts the driver and TLAN adapter in a - * state where it is ready to send and receive packets. - * It allocates the IRQ, resets and brings the adapter - * out of reset, and allows interrupts. It also delays - * the startup for autonegotiation or sends a Rx GO - * command to the adapter, as appropriate. - * - **************************************************************/ + /*************************************************************** + * TLan_Open + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev Structure of device to be opened. + * + * This routine puts the driver and TLAN adapter in a + * state where it is ready to send and receive packets. + * It allocates the IRQ, resets and brings the adapter + * out of reset, and allows interrupts. It also delays + * the startup for autonegotiation or sends a Rx GO + * command to the adapter, as appropriate. + * + **************************************************************/ -static int tlan_open(struct net_device *dev) +static int TLan_Open( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int err; - priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION); - err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED, - dev->name, dev); + priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); + err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, + dev->name, dev ); - if (err) { + if ( err ) { pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", - dev->name, dev->irq); + dev->name, dev->irq ); return err; } init_timer(&priv->timer); + netif_start_queue(dev); - tlan_start(dev); + /* NOTE: It might not be necessary to read the stats before a + reset if you don't care what the values are. + */ + TLan_ResetLists( dev ); + TLan_ReadAndClearStats( dev, TLAN_IGNORE ); + TLan_ResetAdapter( dev ); - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", - dev->name, priv->tlan_rev); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", + dev->name, priv->tlanRev ); return 0; -} +} /* TLan_Open */ -/************************************************************** - * tlan_ioctl - * - * Returns: - * 0 on success, error code otherwise - * Params: - * dev structure of device to receive ioctl. - * - * rq ifreq structure to hold userspace data. - * - * cmd ioctl command. - * - * - *************************************************************/ + /************************************************************** + * TLan_ioctl + * + * Returns: + * 0 on success, error code otherwise + * Params: + * dev structure of device to receive ioctl. + * + * rq ifreq structure to hold userspace data. + * + * cmd ioctl command. + * + * + *************************************************************/ -static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); - u32 phy = priv->phy[priv->phy_num]; + u32 phy = priv->phy[priv->phyNum]; - if (!priv->phy_online) + if (!priv->phyOnline) return -EAGAIN; - switch (cmd) { - case SIOCGMIIPHY: /* get address of MII PHY in use. */ - data->phy_id = phy; + switch(cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = phy; - case SIOCGMIIREG: /* read MII PHY register. */ - tlan_mii_read_reg(dev, data->phy_id & 0x1f, - data->reg_num & 0x1f, &data->val_out); - return 0; + case SIOCGMIIREG: /* Read MII PHY register. */ + TLan_MiiReadReg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, &data->val_out); + return 0; - case SIOCSMIIREG: /* write MII PHY register. */ - tlan_mii_write_reg(dev, data->phy_id & 0x1f, - data->reg_num & 0x1f, data->val_in); - return 0; - default: - return -EOPNOTSUPP; + case SIOCSMIIREG: /* Write MII PHY register. */ + TLan_MiiWriteReg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + return 0; + default: + return -EOPNOTSUPP; } -} +} /* tlan_ioctl */ -/*************************************************************** - * tlan_tx_timeout - * - * Returns: nothing - * - * Params: - * dev structure of device which timed out - * during transmit. - * - **************************************************************/ + /*************************************************************** + * TLan_tx_timeout + * + * Returns: nothing + * + * Params: + * dev structure of device which timed out + * during transmit. + * + **************************************************************/ -static void tlan_tx_timeout(struct net_device *dev) +static void TLan_tx_timeout(struct net_device *dev) { - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); /* Ok so we timed out, lets see what we can do about it...*/ - tlan_free_lists(dev); - tlan_reset_lists(dev); - tlan_read_and_clear_stats(dev, TLAN_IGNORE); - tlan_reset_adapter(dev); + TLan_FreeLists( dev ); + TLan_ResetLists( dev ); + TLan_ReadAndClearStats( dev, TLAN_IGNORE ); + TLan_ResetAdapter( dev ); dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); + netif_wake_queue( dev ); } -/*************************************************************** - * tlan_tx_timeout_work - * - * Returns: nothing - * - * Params: - * work work item of device which timed out - * - **************************************************************/ + /*************************************************************** + * TLan_tx_timeout_work + * + * Returns: nothing + * + * Params: + * work work item of device which timed out + * + **************************************************************/ -static void tlan_tx_timeout_work(struct work_struct *work) +static void TLan_tx_timeout_work(struct work_struct *work) { - struct tlan_priv *priv = - container_of(work, struct tlan_priv, tlan_tqueue); + TLanPrivateInfo *priv = + container_of(work, TLanPrivateInfo, tlan_tqueue); - tlan_tx_timeout(priv->dev); + TLan_tx_timeout(priv->dev); } -/*************************************************************** - * tlan_start_tx - * - * Returns: - * 0 on success, non-zero on failure. - * Parms: - * skb A pointer to the sk_buff containing the - * frame to be sent. - * dev The device to send the data on. - * - * This function adds a frame to the Tx list to be sent - * ASAP. First it verifies that the adapter is ready and - * there is room in the queue. Then it sets up the next - * available list, copies the frame to the corresponding - * buffer. If the adapter Tx channel is idle, it gives - * the adapter a Tx Go command on the list, otherwise it - * sets the forward address of the previous list to point - * to this one. Then it frees the sk_buff. - * - **************************************************************/ - -static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev) + /*************************************************************** + * TLan_StartTx + * + * Returns: + * 0 on success, non-zero on failure. + * Parms: + * skb A pointer to the sk_buff containing the + * frame to be sent. + * dev The device to send the data on. + * + * This function adds a frame to the Tx list to be sent + * ASAP. First it verifies that the adapter is ready and + * there is room in the queue. Then it sets up the next + * available list, copies the frame to the corresponding + * buffer. If the adapter Tx channel is idle, it gives + * the adapter a Tx Go command on the list, otherwise it + * sets the forward address of the previous list to point + * to this one. Then it frees the sk_buff. + * + **************************************************************/ + +static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); dma_addr_t tail_list_phys; - struct tlan_list *tail_list; + TLanList *tail_list; unsigned long flags; unsigned int txlen; - if (!priv->phy_online) { - TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", - dev->name); + if ( ! priv->phyOnline ) { + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", + dev->name ); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1178,214 +1100,218 @@ static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); - tail_list = priv->tx_list + priv->tx_tail; - tail_list_phys = - priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail; + tail_list = priv->txList + priv->txTail; + tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; - if (tail_list->c_stat != TLAN_CSTAT_UNUSED) { - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", - dev->name, priv->tx_head, priv->tx_tail); + if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { + TLAN_DBG( TLAN_DEBUG_TX, + "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", + dev->name, priv->txHead, priv->txTail ); netif_stop_queue(dev); - priv->tx_busy_count++; + priv->txBusyCount++; return NETDEV_TX_BUSY; } tail_list->forward = 0; - tail_list->buffer[0].address = pci_map_single(priv->pci_dev, + tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, txlen, PCI_DMA_TODEVICE); - tlan_store_skb(tail_list, skb); + TLan_StoreSKB(tail_list, skb); - tail_list->frame_size = (u16) txlen; + tail_list->frameSize = (u16) txlen; tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; tail_list->buffer[1].count = 0; tail_list->buffer[1].address = 0; spin_lock_irqsave(&priv->lock, flags); - tail_list->c_stat = TLAN_CSTAT_READY; - if (!priv->tx_in_progress) { - priv->tx_in_progress = 1; - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: Starting TX on buffer %d\n", - priv->tx_tail); - outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM); - outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD); + tail_list->cStat = TLAN_CSTAT_READY; + if ( ! priv->txInProgress ) { + priv->txInProgress = 1; + TLAN_DBG( TLAN_DEBUG_TX, + "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); + outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); + outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); } else { - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: Adding buffer %d to TX channel\n", - priv->tx_tail); - if (priv->tx_tail == 0) { - (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", + priv->txTail ); + if ( priv->txTail == 0 ) { + ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; } else { - (priv->tx_list + (priv->tx_tail - 1))->forward + ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; } } spin_unlock_irqrestore(&priv->lock, flags); - CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS); + CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); return NETDEV_TX_OK; -} +} /* TLan_StartTx */ -/*************************************************************** - * tlan_handle_interrupt - * - * Returns: - * Nothing - * Parms: - * irq The line on which the interrupt - * occurred. - * dev_id A pointer to the device assigned to - * this irq line. - * - * This function handles an interrupt generated by its - * assigned TLAN adapter. The function deactivates - * interrupts on its adapter, records the type of - * interrupt, executes the appropriate subhandler, and - * acknowdges the interrupt to the adapter (thus - * re-enabling adapter interrupts. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleInterrupt + * + * Returns: + * Nothing + * Parms: + * irq The line on which the interrupt + * occurred. + * dev_id A pointer to the device assigned to + * this irq line. + * + * This function handles an interrupt generated by its + * assigned TLAN adapter. The function deactivates + * interrupts on its adapter, records the type of + * interrupt, executes the appropriate subhandler, and + * acknowdges the interrupt to the adapter (thus + * re-enabling adapter interrupts. + * + **************************************************************/ -static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id) +static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 host_int; u16 type; spin_lock(&priv->lock); - host_int = inw(dev->base_addr + TLAN_HOST_INT); - type = (host_int & TLAN_HI_IT_MASK) >> 2; - if (type) { + host_int = inw( dev->base_addr + TLAN_HOST_INT ); + type = ( host_int & TLAN_HI_IT_MASK ) >> 2; + if ( type ) { u32 ack; u32 host_cmd; - outw(host_int, dev->base_addr + TLAN_HOST_INT); - ack = tlan_int_vector[type](dev, host_int); + outw( host_int, dev->base_addr + TLAN_HOST_INT ); + ack = TLanIntVector[type]( dev, host_int ); - if (ack) { - host_cmd = TLAN_HC_ACK | ack | (type << 18); - outl(host_cmd, dev->base_addr + TLAN_HOST_CMD); + if ( ack ) { + host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); + outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); } } spin_unlock(&priv->lock); return IRQ_RETVAL(type); -} +} /* TLan_HandleInterrupts */ -/*************************************************************** - * tlan_close - * - * Returns: - * An error code. - * Parms: - * dev The device structure of the device to - * close. - * - * This function shuts down the adapter. It records any - * stats, puts the adapter into reset state, deactivates - * its time as needed, and frees the irq it is using. - * - **************************************************************/ + /*************************************************************** + * TLan_Close + * + * Returns: + * An error code. + * Parms: + * dev The device structure of the device to + * close. + * + * This function shuts down the adapter. It records any + * stats, puts the adapter into reset state, deactivates + * its time as needed, and frees the irq it is using. + * + **************************************************************/ -static int tlan_close(struct net_device *dev) +static int TLan_Close(struct net_device *dev) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); + netif_stop_queue(dev); priv->neg_be_verbose = 0; - tlan_stop(dev); - free_irq(dev->irq, dev); - tlan_free_lists(dev); - TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name); + TLan_ReadAndClearStats( dev, TLAN_RECORD ); + outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); + if ( priv->timer.function != NULL ) { + del_timer_sync( &priv->timer ); + priv->timer.function = NULL; + } + + free_irq( dev->irq, dev ); + TLan_FreeLists( dev ); + TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name ); return 0; -} +} /* TLan_Close */ -/*************************************************************** - * tlan_get_stats - * - * Returns: - * A pointer to the device's statistics structure. - * Parms: - * dev The device structure to return the - * stats for. - * - * This function updates the devices statistics by reading - * the TLAN chip's onboard registers. Then it returns the - * address of the statistics structure. - * - **************************************************************/ + /*************************************************************** + * TLan_GetStats + * + * Returns: + * A pointer to the device's statistics structure. + * Parms: + * dev The device structure to return the + * stats for. + * + * This function updates the devices statistics by reading + * the TLAN chip's onboard registers. Then it returns the + * address of the statistics structure. + * + **************************************************************/ -static struct net_device_stats *tlan_get_stats(struct net_device *dev) +static struct net_device_stats *TLan_GetStats( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int i; /* Should only read stats if open ? */ - tlan_read_and_clear_stats(dev, TLAN_RECORD); + TLan_ReadAndClearStats( dev, TLAN_RECORD ); - TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, - priv->rx_eoc_count); - TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, - priv->tx_busy_count); - if (debug & TLAN_DEBUG_GNRL) { - tlan_print_dio(dev->base_addr); - tlan_phy_print(dev); + TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, + priv->rxEocCount ); + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, + priv->txBusyCount ); + if ( debug & TLAN_DEBUG_GNRL ) { + TLan_PrintDio( dev->base_addr ); + TLan_PhyPrint( dev ); } - if (debug & TLAN_DEBUG_LIST) { - for (i = 0; i < TLAN_NUM_RX_LISTS; i++) - tlan_print_list(priv->rx_list + i, "RX", i); - for (i = 0; i < TLAN_NUM_TX_LISTS; i++) - tlan_print_list(priv->tx_list + i, "TX", i); + if ( debug & TLAN_DEBUG_LIST ) { + for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) + TLan_PrintList( priv->rxList + i, "RX", i ); + for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) + TLan_PrintList( priv->txList + i, "TX", i ); } return &dev->stats; -} +} /* TLan_GetStats */ -/*************************************************************** - * tlan_set_multicast_list - * - * Returns: - * Nothing - * Parms: - * dev The device structure to set the - * multicast list for. - * - * This function sets the TLAN adaptor to various receive - * modes. If the IFF_PROMISC flag is set, promiscuous - * mode is acitviated. Otherwise, promiscuous mode is - * turned off. If the IFF_ALLMULTI flag is set, then - * the hash table is set to receive all group addresses. - * Otherwise, the first three multicast addresses are - * stored in AREG_1-3, and the rest are selected via the - * hash table, as necessary. - * - **************************************************************/ + /*************************************************************** + * TLan_SetMulticastList + * + * Returns: + * Nothing + * Parms: + * dev The device structure to set the + * multicast list for. + * + * This function sets the TLAN adaptor to various receive + * modes. If the IFF_PROMISC flag is set, promiscuous + * mode is acitviated. Otherwise, promiscuous mode is + * turned off. If the IFF_ALLMULTI flag is set, then + * the hash table is set to receive all group addresses. + * Otherwise, the first three multicast addresses are + * stored in AREG_1-3, and the rest are selected via the + * hash table, as necessary. + * + **************************************************************/ -static void tlan_set_multicast_list(struct net_device *dev) +static void TLan_SetMulticastList( struct net_device *dev ) { struct netdev_hw_addr *ha; u32 hash1 = 0; @@ -1394,56 +1320,53 @@ static void tlan_set_multicast_list(struct net_device *dev) u32 offset; u8 tmp; - if (dev->flags & IFF_PROMISC) { - tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); - tlan_dio_write8(dev->base_addr, - TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF); + if ( dev->flags & IFF_PROMISC ) { + tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); + TLan_DioWrite8( dev->base_addr, + TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); } else { - tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); - tlan_dio_write8(dev->base_addr, - TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF); - if (dev->flags & IFF_ALLMULTI) { - for (i = 0; i < 3; i++) - tlan_set_mac(dev, i + 1, NULL); - tlan_dio_write32(dev->base_addr, TLAN_HASH_1, - 0xffffffff); - tlan_dio_write32(dev->base_addr, TLAN_HASH_2, - 0xffffffff); + tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); + TLan_DioWrite8( dev->base_addr, + TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); + if ( dev->flags & IFF_ALLMULTI ) { + for ( i = 0; i < 3; i++ ) + TLan_SetMac( dev, i + 1, NULL ); + TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); + TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); } else { i = 0; netdev_for_each_mc_addr(ha, dev) { - if (i < 3) { - tlan_set_mac(dev, i + 1, + if ( i < 3 ) { + TLan_SetMac( dev, i + 1, (char *) &ha->addr); } else { - offset = - tlan_hash_func((u8 *)&ha->addr); - if (offset < 32) - hash1 |= (1 << offset); + offset = TLan_HashFunc((u8 *)&ha->addr); + if ( offset < 32 ) + hash1 |= ( 1 << offset ); else - hash2 |= (1 << (offset - 32)); + hash2 |= ( 1 << ( offset - 32 ) ); } i++; } - for ( ; i < 3; i++) - tlan_set_mac(dev, i + 1, NULL); - tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1); - tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2); + for ( ; i < 3; i++ ) + TLan_SetMac( dev, i + 1, NULL ); + TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); + TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); } } -} +} /* TLan_SetMulticastList */ /***************************************************************************** ****************************************************************************** -ThunderLAN driver interrupt vectors and table + ThunderLAN Driver Interrupt Vectors and Table -please see chap. 4, "Interrupt Handling" of the "ThunderLAN -Programmer's Guide" for more informations on handling interrupts -generated by TLAN based adapters. + Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN + Programmer's Guide" for more informations on handling interrupts + generated by TLAN based adapters. ****************************************************************************** *****************************************************************************/ @@ -1451,48 +1374,46 @@ generated by TLAN based adapters. -/*************************************************************** - * tlan_handle_tx_eof - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles Tx EOF interrupts which are raised - * by the adapter when it has completed sending the - * contents of a buffer. If detemines which list/buffer - * was completed and resets it. If the buffer was the last - * in the channel (EOC), then the function checks to see if - * another buffer is ready to send, and if so, sends a Tx - * Go command. Finally, the driver activates/continues the - * activity LED. - * - **************************************************************/ - -static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) + /*************************************************************** + * TLan_HandleTxEOF + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Tx EOF interrupts which are raised + * by the adapter when it has completed sending the + * contents of a buffer. If detemines which list/buffer + * was completed and resets it. If the buffer was the last + * in the channel (EOC), then the function checks to see if + * another buffer is ready to send, and if so, sends a Tx + * Go command. Finally, the driver activates/continues the + * activity LED. + * + **************************************************************/ + +static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int eoc = 0; - struct tlan_list *head_list; + TLanList *head_list; dma_addr_t head_list_phys; u32 ack = 0; - u16 tmp_c_stat; + u16 tmpCStat; - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", - priv->tx_head, priv->tx_tail); - head_list = priv->tx_list + priv->tx_head; + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", + priv->txHead, priv->txTail ); + head_list = priv->txList + priv->txHead; - while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) - && (ack < 255)) { - struct sk_buff *skb = tlan_get_skb(head_list); + while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { + struct sk_buff *skb = TLan_GetSKB(head_list); ack++; - pci_unmap_single(priv->pci_dev, head_list->buffer[0].address, + pci_unmap_single(priv->pciDev, head_list->buffer[0].address, max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE), PCI_DMA_TODEVICE); @@ -1500,311 +1421,304 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) head_list->buffer[8].address = 0; head_list->buffer[9].address = 0; - if (tmp_c_stat & TLAN_CSTAT_EOC) + if ( tmpCStat & TLAN_CSTAT_EOC ) eoc = 1; - dev->stats.tx_bytes += head_list->frame_size; + dev->stats.tx_bytes += head_list->frameSize; - head_list->c_stat = TLAN_CSTAT_UNUSED; + head_list->cStat = TLAN_CSTAT_UNUSED; netif_start_queue(dev); - CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS); - head_list = priv->tx_list + priv->tx_head; + CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); + head_list = priv->txList + priv->txHead; } if (!ack) - printk(KERN_INFO - "TLAN: Received interrupt for uncompleted TX frame.\n"); - - if (eoc) { - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n", - priv->tx_head, priv->tx_tail); - head_list = priv->tx_list + priv->tx_head; - head_list_phys = priv->tx_list_dma - + sizeof(struct tlan_list)*priv->tx_head; - if (head_list->c_stat & TLAN_CSTAT_READY) { - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); + + if ( eoc ) { + TLAN_DBG( TLAN_DEBUG_TX, + "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", + priv->txHead, priv->txTail ); + head_list = priv->txList + priv->txHead; + head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; + if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO; } else { - priv->tx_in_progress = 0; + priv->txInProgress = 0; } } - if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { - tlan_dio_write8(dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); - if (priv->timer.function == NULL) { - priv->timer.function = tlan_timer; - priv->timer.data = (unsigned long) dev; - priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; - priv->timer_set_at = jiffies; - priv->timer_type = TLAN_TIMER_ACTIVITY; - add_timer(&priv->timer); - } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { - priv->timer_set_at = jiffies; + if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { + TLan_DioWrite8( dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); + if ( priv->timer.function == NULL ) { + priv->timer.function = TLan_Timer; + priv->timer.data = (unsigned long) dev; + priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; + priv->timerSetAt = jiffies; + priv->timerType = TLAN_TIMER_ACTIVITY; + add_timer(&priv->timer); + } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { + priv->timerSetAt = jiffies; } } return ack; -} +} /* TLan_HandleTxEOF */ -/*************************************************************** - * TLan_HandleStatOverflow - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Statistics Overflow interrupt - * which means that one or more of the TLAN statistics - * registers has reached 1/2 capacity and needs to be read. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleStatOverflow + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Statistics Overflow interrupt + * which means that one or more of the TLAN statistics + * registers has reached 1/2 capacity and needs to be read. + * + **************************************************************/ -static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int) +static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) { - tlan_read_and_clear_stats(dev, TLAN_RECORD); + TLan_ReadAndClearStats( dev, TLAN_RECORD ); return 1; -} - - - - -/*************************************************************** - * TLan_HandleRxEOF - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Rx EOF interrupt which - * indicates a frame has been received by the adapter from - * the net and the frame has been transferred to memory. - * The function determines the bounce buffer the frame has - * been loaded into, creates a new sk_buff big enough to - * hold the frame, and sends it to protocol stack. It - * then resets the used buffer and appends it to the end - * of the list. If the frame was the last in the Rx - * channel (EOC), the function restarts the receive channel - * by sending an Rx Go command to the adapter. Then it - * activates/continues the activity LED. - * - **************************************************************/ - -static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int) +} /* TLan_HandleStatOverflow */ + + + + + /*************************************************************** + * TLan_HandleRxEOF + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Rx EOF interrupt which + * indicates a frame has been received by the adapter from + * the net and the frame has been transferred to memory. + * The function determines the bounce buffer the frame has + * been loaded into, creates a new sk_buff big enough to + * hold the frame, and sends it to protocol stack. It + * then resets the used buffer and appends it to the end + * of the list. If the frame was the last in the Rx + * channel (EOC), the function restarts the receive channel + * by sending an Rx Go command to the adapter. Then it + * activates/continues the activity LED. + * + **************************************************************/ + +static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u32 ack = 0; int eoc = 0; - struct tlan_list *head_list; + TLanList *head_list; struct sk_buff *skb; - struct tlan_list *tail_list; - u16 tmp_c_stat; + TLanList *tail_list; + u16 tmpCStat; dma_addr_t head_list_phys; - TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n", - priv->rx_head, priv->rx_tail); - head_list = priv->rx_list + priv->rx_head; - head_list_phys = - priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head; + TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", + priv->rxHead, priv->rxTail ); + head_list = priv->rxList + priv->rxHead; + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; - while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) - && (ack < 255)) { - dma_addr_t frame_dma = head_list->buffer[0].address; - u32 frame_size = head_list->frame_size; + while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { + dma_addr_t frameDma = head_list->buffer[0].address; + u32 frameSize = head_list->frameSize; struct sk_buff *new_skb; ack++; - if (tmp_c_stat & TLAN_CSTAT_EOC) + if (tmpCStat & TLAN_CSTAT_EOC) eoc = 1; new_skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!new_skb) + if ( !new_skb ) goto drop_and_reuse; - skb = tlan_get_skb(head_list); - pci_unmap_single(priv->pci_dev, frame_dma, + skb = TLan_GetSKB(head_list); + pci_unmap_single(priv->pciDev, frameDma, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - skb_put(skb, frame_size); + skb_put( skb, frameSize ); - dev->stats.rx_bytes += frame_size; + dev->stats.rx_bytes += frameSize; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); + skb->protocol = eth_type_trans( skb, dev ); + netif_rx( skb ); - head_list->buffer[0].address = - pci_map_single(priv->pci_dev, new_skb->data, - TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); + head_list->buffer[0].address = pci_map_single(priv->pciDev, + new_skb->data, + TLAN_MAX_FRAME_SIZE, + PCI_DMA_FROMDEVICE); - tlan_store_skb(head_list, new_skb); + TLan_StoreSKB(head_list, new_skb); drop_and_reuse: head_list->forward = 0; - head_list->c_stat = 0; - tail_list = priv->rx_list + priv->rx_tail; + head_list->cStat = 0; + tail_list = priv->rxList + priv->rxTail; tail_list->forward = head_list_phys; - CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS); - CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS); - head_list = priv->rx_list + priv->rx_head; - head_list_phys = priv->rx_list_dma - + sizeof(struct tlan_list)*priv->rx_head; + CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); + CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); + head_list = priv->rxList + priv->rxHead; + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; } if (!ack) - printk(KERN_INFO - "TLAN: Received interrupt for uncompleted RX frame.\n"); - - - if (eoc) { - TLAN_DBG(TLAN_DEBUG_RX, - "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n", - priv->rx_head, priv->rx_tail); - head_list = priv->rx_list + priv->rx_head; - head_list_phys = priv->rx_list_dma - + sizeof(struct tlan_list)*priv->rx_head; - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); + + + if ( eoc ) { + TLAN_DBG( TLAN_DEBUG_RX, + "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", + priv->rxHead, priv->rxTail ); + head_list = priv->rxList + priv->rxHead; + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO | TLAN_HC_RT; - priv->rx_eoc_count++; + priv->rxEocCount++; } - if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { - tlan_dio_write8(dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); - if (priv->timer.function == NULL) { - priv->timer.function = tlan_timer; + if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { + TLan_DioWrite8( dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); + if ( priv->timer.function == NULL ) { + priv->timer.function = TLan_Timer; priv->timer.data = (unsigned long) dev; priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; - priv->timer_set_at = jiffies; - priv->timer_type = TLAN_TIMER_ACTIVITY; + priv->timerSetAt = jiffies; + priv->timerType = TLAN_TIMER_ACTIVITY; add_timer(&priv->timer); - } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { - priv->timer_set_at = jiffies; + } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { + priv->timerSetAt = jiffies; } } return ack; -} +} /* TLan_HandleRxEOF */ -/*************************************************************** - * tlan_handle_dummy - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Dummy interrupt, which is - * raised whenever a test interrupt is generated by setting - * the Req_Int bit of HOST_CMD to 1. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleDummy + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Dummy interrupt, which is + * raised whenever a test interrupt is generated by setting + * the Req_Int bit of HOST_CMD to 1. + * + **************************************************************/ -static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int) +static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) { - pr_info("TLAN: Test interrupt on %s.\n", dev->name); + printk( "TLAN: Test interrupt on %s.\n", dev->name ); return 1; -} +} /* TLan_HandleDummy */ -/*************************************************************** - * tlan_handle_tx_eoc - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This driver is structured to determine EOC occurrences by - * reading the CSTAT member of the list structure. Tx EOC - * interrupts are disabled via the DIO INTDIS register. - * However, TLAN chips before revision 3.0 didn't have this - * functionality, so process EOC events if this is the - * case. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleTxEOC + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Tx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * functionality, so process EOC events if this is the + * case. + * + **************************************************************/ -static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) +static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) { - struct tlan_priv *priv = netdev_priv(dev); - struct tlan_list *head_list; + TLanPrivateInfo *priv = netdev_priv(dev); + TLanList *head_list; dma_addr_t head_list_phys; u32 ack = 1; host_int = 0; - if (priv->tlan_rev < 0x30) { - TLAN_DBG(TLAN_DEBUG_TX, - "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", - priv->tx_head, priv->tx_tail); - head_list = priv->tx_list + priv->tx_head; - head_list_phys = priv->tx_list_dma - + sizeof(struct tlan_list)*priv->tx_head; - if (head_list->c_stat & TLAN_CSTAT_READY) { + if ( priv->tlanRev < 0x30 ) { + TLAN_DBG( TLAN_DEBUG_TX, + "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", + priv->txHead, priv->txTail ); + head_list = priv->txList + priv->txHead; + head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; + if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { netif_stop_queue(dev); - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO; } else { - priv->tx_in_progress = 0; + priv->txInProgress = 0; } } return ack; -} +} /* TLan_HandleTxEOC */ -/*************************************************************** - * tlan_handle_status_check - * - * Returns: - * 0 if Adapter check, 1 if Network Status check. - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles Adapter Check/Network Status - * interrupts generated by the adapter. It checks the - * vector in the HOST_INT register to determine if it is - * an Adapter Check interrupt. If so, it resets the - * adapter. Otherwise it clears the status registers - * and services the PHY. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleStatusCheck + * + * Returns: + * 0 if Adapter check, 1 if Network Status check. + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Adapter Check/Network Status + * interrupts generated by the adapter. It checks the + * vector in the HOST_INT register to determine if it is + * an Adapter Check interrupt. If so, it resets the + * adapter. Otherwise it clears the status registers + * and services the PHY. + * + **************************************************************/ -static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) +static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u32 ack; u32 error; u8 net_sts; @@ -1813,94 +1727,92 @@ static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) u16 tlphy_sts; ack = 1; - if (host_int & TLAN_HI_IV_MASK) { - netif_stop_queue(dev); - error = inl(dev->base_addr + TLAN_CH_PARM); - pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error); - tlan_read_and_clear_stats(dev, TLAN_RECORD); - outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); + if ( host_int & TLAN_HI_IV_MASK ) { + netif_stop_queue( dev ); + error = inl( dev->base_addr + TLAN_CH_PARM ); + printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); + TLan_ReadAndClearStats( dev, TLAN_RECORD ); + outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); schedule_work(&priv->tlan_tqueue); netif_wake_queue(dev); ack = 0; } else { - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name); - phy = priv->phy[priv->phy_num]; - - net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS); - if (net_sts) { - tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts); - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", - dev->name, (unsigned) net_sts); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); + phy = priv->phy[priv->phyNum]; + + net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); + if ( net_sts ) { + TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", + dev->name, (unsigned) net_sts ); } - if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) { - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); - if (!(tlphy_sts & TLAN_TS_POLOK) && - !(tlphy_ctl & TLAN_TC_SWAPOL)) { - tlphy_ctl |= TLAN_TC_SWAPOL; - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, - tlphy_ctl); - } else if ((tlphy_sts & TLAN_TS_POLOK) && - (tlphy_ctl & TLAN_TC_SWAPOL)) { - tlphy_ctl &= ~TLAN_TC_SWAPOL; - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, - tlphy_ctl); + if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { + TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); + TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); + if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && + ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { + tlphy_ctl |= TLAN_TC_SWAPOL; + TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); + } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && + ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { + tlphy_ctl &= ~TLAN_TC_SWAPOL; + TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); + } + + if (debug) { + TLan_PhyPrint( dev ); } - - if (debug) - tlan_phy_print(dev); } } return ack; -} +} /* TLan_HandleStatusCheck */ -/*************************************************************** - * tlan_handle_rx_eoc - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This driver is structured to determine EOC occurrences by - * reading the CSTAT member of the list structure. Rx EOC - * interrupts are disabled via the DIO INTDIS register. - * However, TLAN chips before revision 3.0 didn't have this - * CSTAT member or a INTDIS register, so if this chip is - * pre-3.0, process EOC interrupts normally. - * - **************************************************************/ + /*************************************************************** + * TLan_HandleRxEOC + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Rx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * CSTAT member or a INTDIS register, so if this chip is + * pre-3.0, process EOC interrupts normally. + * + **************************************************************/ -static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int) +static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); dma_addr_t head_list_phys; u32 ack = 1; - if (priv->tlan_rev < 0x30) { - TLAN_DBG(TLAN_DEBUG_RX, - "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n", - priv->rx_head, priv->rx_tail); - head_list_phys = priv->rx_list_dma - + sizeof(struct tlan_list)*priv->rx_head; - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + if ( priv->tlanRev < 0x30 ) { + TLAN_DBG( TLAN_DEBUG_RX, + "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", + priv->rxHead, priv->rxTail ); + head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); ack |= TLAN_HC_GO | TLAN_HC_RT; - priv->rx_eoc_count++; + priv->rxEocCount++; } return ack; -} +} /* TLan_HandleRxEOC */ @@ -1908,98 +1820,98 @@ static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int) /***************************************************************************** ****************************************************************************** -ThunderLAN driver timer function + ThunderLAN Driver Timer Function ****************************************************************************** *****************************************************************************/ -/*************************************************************** - * tlan_timer - * - * Returns: - * Nothing - * Parms: - * data A value given to add timer when - * add_timer was called. - * - * This function handles timed functionality for the - * TLAN driver. The two current timer uses are for - * delaying for autonegotionation and driving the ACT LED. - * - Autonegotiation requires being allowed about - * 2 1/2 seconds before attempting to transmit a - * packet. It would be a very bad thing to hang - * the kernel this long, so the driver doesn't - * allow transmission 'til after this time, for - * certain PHYs. It would be much nicer if all - * PHYs were interrupt-capable like the internal - * PHY. - * - The ACT LED, which shows adapter activity, is - * driven by the driver, and so must be left on - * for a short period to power up the LED so it - * can be seen. This delay can be changed by - * changing the TLAN_TIMER_ACT_DELAY in tlan.h, - * if desired. 100 ms produces a slightly - * sluggish response. - * - **************************************************************/ - -static void tlan_timer(unsigned long data) + /*************************************************************** + * TLan_Timer + * + * Returns: + * Nothing + * Parms: + * data A value given to add timer when + * add_timer was called. + * + * This function handles timed functionality for the + * TLAN driver. The two current timer uses are for + * delaying for autonegotionation and driving the ACT LED. + * - Autonegotiation requires being allowed about + * 2 1/2 seconds before attempting to transmit a + * packet. It would be a very bad thing to hang + * the kernel this long, so the driver doesn't + * allow transmission 'til after this time, for + * certain PHYs. It would be much nicer if all + * PHYs were interrupt-capable like the internal + * PHY. + * - The ACT LED, which shows adapter activity, is + * driven by the driver, and so must be left on + * for a short period to power up the LED so it + * can be seen. This delay can be changed by + * changing the TLAN_TIMER_ACT_DELAY in tlan.h, + * if desired. 100 ms produces a slightly + * sluggish response. + * + **************************************************************/ + +static void TLan_Timer( unsigned long data ) { struct net_device *dev = (struct net_device *) data; - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u32 elapsed; unsigned long flags = 0; priv->timer.function = NULL; - switch (priv->timer_type) { + switch ( priv->timerType ) { #ifdef MONITOR - case TLAN_TIMER_LINK_BEAT: - tlan_phy_monitor(dev); - break; + case TLAN_TIMER_LINK_BEAT: + TLan_PhyMonitor( dev ); + break; #endif - case TLAN_TIMER_PHY_PDOWN: - tlan_phy_power_down(dev); - break; - case TLAN_TIMER_PHY_PUP: - tlan_phy_power_up(dev); - break; - case TLAN_TIMER_PHY_RESET: - tlan_phy_reset(dev); - break; - case TLAN_TIMER_PHY_START_LINK: - tlan_phy_start_link(dev); - break; - case TLAN_TIMER_PHY_FINISH_AN: - tlan_phy_finish_auto_neg(dev); - break; - case TLAN_TIMER_FINISH_RESET: - tlan_finish_reset(dev); - break; - case TLAN_TIMER_ACTIVITY: - spin_lock_irqsave(&priv->lock, flags); - if (priv->timer.function == NULL) { - elapsed = jiffies - priv->timer_set_at; - if (elapsed >= TLAN_TIMER_ACT_DELAY) { - tlan_dio_write8(dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK); - } else { - priv->timer.function = tlan_timer; - priv->timer.expires = priv->timer_set_at - + TLAN_TIMER_ACT_DELAY; - spin_unlock_irqrestore(&priv->lock, flags); - add_timer(&priv->timer); - break; + case TLAN_TIMER_PHY_PDOWN: + TLan_PhyPowerDown( dev ); + break; + case TLAN_TIMER_PHY_PUP: + TLan_PhyPowerUp( dev ); + break; + case TLAN_TIMER_PHY_RESET: + TLan_PhyReset( dev ); + break; + case TLAN_TIMER_PHY_START_LINK: + TLan_PhyStartLink( dev ); + break; + case TLAN_TIMER_PHY_FINISH_AN: + TLan_PhyFinishAutoNeg( dev ); + break; + case TLAN_TIMER_FINISH_RESET: + TLan_FinishReset( dev ); + break; + case TLAN_TIMER_ACTIVITY: + spin_lock_irqsave(&priv->lock, flags); + if ( priv->timer.function == NULL ) { + elapsed = jiffies - priv->timerSetAt; + if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { + TLan_DioWrite8( dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK ); + } else { + priv->timer.function = TLan_Timer; + priv->timer.expires = priv->timerSetAt + + TLAN_TIMER_ACT_DELAY; + spin_unlock_irqrestore(&priv->lock, flags); + add_timer( &priv->timer ); + break; + } } - } - spin_unlock_irqrestore(&priv->lock, flags); - break; - default: - break; + spin_unlock_irqrestore(&priv->lock, flags); + break; + default: + break; } -} +} /* TLan_Timer */ @@ -2007,39 +1919,39 @@ static void tlan_timer(unsigned long data) /***************************************************************************** ****************************************************************************** -ThunderLAN driver adapter related routines + ThunderLAN Driver Adapter Related Routines ****************************************************************************** *****************************************************************************/ -/*************************************************************** - * tlan_reset_lists - * - * Returns: - * Nothing - * Parms: - * dev The device structure with the list - * stuctures to be reset. - * - * This routine sets the variables associated with managing - * the TLAN lists to their initial values. - * - **************************************************************/ - -static void tlan_reset_lists(struct net_device *dev) + /*************************************************************** + * TLan_ResetLists + * + * Returns: + * Nothing + * Parms: + * dev The device structure with the list + * stuctures to be reset. + * + * This routine sets the variables associated with managing + * the TLAN lists to their initial values. + * + **************************************************************/ + +static void TLan_ResetLists( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int i; - struct tlan_list *list; + TLanList *list; dma_addr_t list_phys; struct sk_buff *skb; - priv->tx_head = 0; - priv->tx_tail = 0; - for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { - list = priv->tx_list + i; - list->c_stat = TLAN_CSTAT_UNUSED; + priv->txHead = 0; + priv->txTail = 0; + for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { + list = priv->txList + i; + list->cStat = TLAN_CSTAT_UNUSED; list->buffer[0].address = 0; list->buffer[2].count = 0; list->buffer[2].address = 0; @@ -2047,169 +1959,169 @@ static void tlan_reset_lists(struct net_device *dev) list->buffer[9].address = 0; } - priv->rx_head = 0; - priv->rx_tail = TLAN_NUM_RX_LISTS - 1; - for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { - list = priv->rx_list + i; - list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i; - list->c_stat = TLAN_CSTAT_READY; - list->frame_size = TLAN_MAX_FRAME_SIZE; + priv->rxHead = 0; + priv->rxTail = TLAN_NUM_RX_LISTS - 1; + for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { + list = priv->rxList + i; + list_phys = priv->rxListDMA + sizeof(TLanList) * i; + list->cStat = TLAN_CSTAT_READY; + list->frameSize = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) { - pr_err("TLAN: out of memory for received data.\n"); + if ( !skb ) { + pr_err("TLAN: out of memory for received data.\n" ); break; } - list->buffer[0].address = pci_map_single(priv->pci_dev, + list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - tlan_store_skb(list, skb); + TLan_StoreSKB(list, skb); list->buffer[1].count = 0; list->buffer[1].address = 0; - list->forward = list_phys + sizeof(struct tlan_list); + list->forward = list_phys + sizeof(TLanList); } /* in case ran out of memory early, clear bits */ while (i < TLAN_NUM_RX_LISTS) { - tlan_store_skb(priv->rx_list + i, NULL); + TLan_StoreSKB(priv->rxList + i, NULL); ++i; } list->forward = 0; -} +} /* TLan_ResetLists */ -static void tlan_free_lists(struct net_device *dev) +static void TLan_FreeLists( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int i; - struct tlan_list *list; + TLanList *list; struct sk_buff *skb; - for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { - list = priv->tx_list + i; - skb = tlan_get_skb(list); - if (skb) { + for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { + list = priv->txList + i; + skb = TLan_GetSKB(list); + if ( skb ) { pci_unmap_single( - priv->pci_dev, + priv->pciDev, list->buffer[0].address, max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE), PCI_DMA_TODEVICE); - dev_kfree_skb_any(skb); + dev_kfree_skb_any( skb ); list->buffer[8].address = 0; list->buffer[9].address = 0; } } - for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { - list = priv->rx_list + i; - skb = tlan_get_skb(list); - if (skb) { - pci_unmap_single(priv->pci_dev, + for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { + list = priv->rxList + i; + skb = TLan_GetSKB(list); + if ( skb ) { + pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(skb); + dev_kfree_skb_any( skb ); list->buffer[8].address = 0; list->buffer[9].address = 0; } } -} +} /* TLan_FreeLists */ -/*************************************************************** - * tlan_print_dio - * - * Returns: - * Nothing - * Parms: - * io_base Base IO port of the device of - * which to print DIO registers. - * - * This function prints out all the internal (DIO) - * registers of a TLAN chip. - * - **************************************************************/ + /*************************************************************** + * TLan_PrintDio + * + * Returns: + * Nothing + * Parms: + * io_base Base IO port of the device of + * which to print DIO registers. + * + * This function prints out all the internal (DIO) + * registers of a TLAN chip. + * + **************************************************************/ -static void tlan_print_dio(u16 io_base) +static void TLan_PrintDio( u16 io_base ) { u32 data0, data1; int i; - pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n", - io_base); - pr_info("TLAN: Off. +0 +4\n"); - for (i = 0; i < 0x4C; i += 8) { - data0 = tlan_dio_read32(io_base, i); - data1 = tlan_dio_read32(io_base, i + 0x4); - pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1); + printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", + io_base ); + printk( "TLAN: Off. +0 +4\n" ); + for ( i = 0; i < 0x4C; i+= 8 ) { + data0 = TLan_DioRead32( io_base, i ); + data1 = TLan_DioRead32( io_base, i + 0x4 ); + printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); } -} +} /* TLan_PrintDio */ -/*************************************************************** - * TLan_PrintList - * - * Returns: - * Nothing - * Parms: - * list A pointer to the struct tlan_list structure to - * be printed. - * type A string to designate type of list, - * "Rx" or "Tx". - * num The index of the list. - * - * This function prints out the contents of the list - * pointed to by the list parameter. - * - **************************************************************/ + /*************************************************************** + * TLan_PrintList + * + * Returns: + * Nothing + * Parms: + * list A pointer to the TLanList structure to + * be printed. + * type A string to designate type of list, + * "Rx" or "Tx". + * num The index of the list. + * + * This function prints out the contents of the list + * pointed to by the list parameter. + * + **************************************************************/ -static void tlan_print_list(struct tlan_list *list, char *type, int num) +static void TLan_PrintList( TLanList *list, char *type, int num) { int i; - pr_info("TLAN: %s List %d at %p\n", type, num, list); - pr_info("TLAN: Forward = 0x%08x\n", list->forward); - pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat); - pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size); - /* for (i = 0; i < 10; i++) { */ - for (i = 0; i < 2; i++) { - pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", - i, list->buffer[i].count, list->buffer[i].address); + printk( "TLAN: %s List %d at %p\n", type, num, list ); + printk( "TLAN: Forward = 0x%08x\n", list->forward ); + printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); + printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); + /* for ( i = 0; i < 10; i++ ) { */ + for ( i = 0; i < 2; i++ ) { + printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", + i, list->buffer[i].count, list->buffer[i].address ); } -} +} /* TLan_PrintList */ -/*************************************************************** - * tlan_read_and_clear_stats - * - * Returns: - * Nothing - * Parms: - * dev Pointer to device structure of adapter - * to which to read stats. - * record Flag indicating whether to add - * - * This functions reads all the internal status registers - * of the TLAN chip, which clears them as a side effect. - * It then either adds the values to the device's status - * struct, or discards them, depending on whether record - * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). - * - **************************************************************/ + /*************************************************************** + * TLan_ReadAndClearStats + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * to which to read stats. + * record Flag indicating whether to add + * + * This functions reads all the internal status registers + * of the TLAN chip, which clears them as a side effect. + * It then either adds the values to the device's status + * struct, or discards them, depending on whether record + * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). + * + **************************************************************/ -static void tlan_read_and_clear_stats(struct net_device *dev, int record) +static void TLan_ReadAndClearStats( struct net_device *dev, int record ) { u32 tx_good, tx_under; u32 rx_good, rx_over; @@ -2217,42 +2129,41 @@ static void tlan_read_and_clear_stats(struct net_device *dev, int record) u32 multi_col, single_col; u32 excess_col, late_col, loss; - outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR); - tx_good = inb(dev->base_addr + TLAN_DIO_DATA); - tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; - tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; - tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3); - - outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR); - rx_good = inb(dev->base_addr + TLAN_DIO_DATA); - rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; - rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; - rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3); - - outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR); - def_tx = inb(dev->base_addr + TLAN_DIO_DATA); - def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; - crc = inb(dev->base_addr + TLAN_DIO_DATA + 2); - code = inb(dev->base_addr + TLAN_DIO_DATA + 3); - - outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR); - multi_col = inb(dev->base_addr + TLAN_DIO_DATA); - multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; - single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2); - single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8; - - outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR); - excess_col = inb(dev->base_addr + TLAN_DIO_DATA); - late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1); - loss = inb(dev->base_addr + TLAN_DIO_DATA + 2); - - if (record) { + outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); + tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); + tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; + tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; + tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); + + outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); + rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); + rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; + rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; + rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); + + outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); + def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); + def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; + crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); + code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); + + outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); + multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); + multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; + single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); + single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; + + outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); + excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); + late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); + loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); + + if ( record ) { dev->stats.rx_packets += rx_good; dev->stats.rx_errors += rx_over + crc + code; dev->stats.tx_packets += tx_good; dev->stats.tx_errors += tx_under + loss; - dev->stats.collisions += multi_col - + single_col + excess_col + late_col; + dev->stats.collisions += multi_col + single_col + excess_col + late_col; dev->stats.rx_over_errors += rx_over; dev->stats.rx_crc_errors += crc; @@ -2262,39 +2173,39 @@ static void tlan_read_and_clear_stats(struct net_device *dev, int record) dev->stats.tx_carrier_errors += loss; } -} +} /* TLan_ReadAndClearStats */ -/*************************************************************** - * TLan_Reset - * - * Returns: - * 0 - * Parms: - * dev Pointer to device structure of adapter - * to be reset. - * - * This function resets the adapter and it's physical - * device. See Chap. 3, pp. 9-10 of the "ThunderLAN - * Programmer's Guide" for details. The routine tries to - * implement what is detailed there, though adjustments - * have been made. - * - **************************************************************/ + /*************************************************************** + * TLan_Reset + * + * Returns: + * 0 + * Parms: + * dev Pointer to device structure of adapter + * to be reset. + * + * This function resets the adapter and it's physical + * device. See Chap. 3, pp. 9-10 of the "ThunderLAN + * Programmer's Guide" for details. The routine tries to + * implement what is detailed there, though adjustments + * have been made. + * + **************************************************************/ static void -tlan_reset_adapter(struct net_device *dev) +TLan_ResetAdapter( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); int i; u32 addr; u32 data; u8 data8; - priv->tlan_full_duplex = false; - priv->phy_online = 0; + priv->tlanFullDuplex = false; + priv->phyOnline=0; netif_carrier_off(dev); /* 1. Assert reset bit. */ @@ -2305,7 +2216,7 @@ tlan_reset_adapter(struct net_device *dev) udelay(1000); -/* 2. Turn off interrupts. (Probably isn't necessary) */ +/* 2. Turn off interrupts. ( Probably isn't necessary ) */ data = inl(dev->base_addr + TLAN_HOST_CMD); data |= TLAN_HC_INT_OFF; @@ -2313,208 +2224,207 @@ tlan_reset_adapter(struct net_device *dev) /* 3. Clear AREGs and HASHs. */ - for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4) - tlan_dio_write32(dev->base_addr, (u16) i, 0); + for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { + TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); + } /* 4. Setup NetConfig register. */ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; - tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); + TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); /* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ - outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD); - outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD); + outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); + outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); /* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ - outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; - tlan_set_bit(TLAN_NET_SIO_NMRST, addr); + TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); /* 7. Setup the remaining registers. */ - if (priv->tlan_rev >= 0x30) { + if ( priv->tlanRev >= 0x30 ) { data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; - tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8); + TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); } - tlan_phy_detect(dev); + TLan_PhyDetect( dev ); data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; - if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) { + if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { data |= TLAN_NET_CFG_BIT; - if (priv->aui == 1) { - tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a); - } else if (priv->duplex == TLAN_DUPLEX_FULL) { - tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00); - priv->tlan_full_duplex = true; + if ( priv->aui == 1 ) { + TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); + } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { + TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); + priv->tlanFullDuplex = true; } else { - tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08); + TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); } } - if (priv->phy_num == 0) + if ( priv->phyNum == 0 ) { data |= TLAN_NET_CFG_PHY_EN; - tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); + } + TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); - if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) - tlan_finish_reset(dev); - else - tlan_phy_power_down(dev); + if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { + TLan_FinishReset( dev ); + } else { + TLan_PhyPowerDown( dev ); + } -} +} /* TLan_ResetAdapter */ static void -tlan_finish_reset(struct net_device *dev) +TLan_FinishReset( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u8 data; u32 phy; u8 sio; u16 status; u16 partner; u16 tlphy_ctl; - u16 tlphy_par; + u16 tlphy_par; u16 tlphy_id1, tlphy_id2; - int i; + int i; - phy = priv->phy[priv->phy_num]; + phy = priv->phy[priv->phyNum]; data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; - if (priv->tlan_full_duplex) + if ( priv->tlanFullDuplex ) { data |= TLAN_NET_CMD_DUPLEX; - tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data); + } + TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data ); data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; - if (priv->phy_num == 0) + if ( priv->phyNum == 0 ) { data |= TLAN_NET_MASK_MASK7; - tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data); - tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7); - tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1); - tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2); + } + TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); + TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); + TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); + TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); - if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) || - (priv->aui)) { + if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || + ( priv->aui ) ) { status = MII_GS_LINK; - pr_info("TLAN: %s: Link forced.\n", dev->name); + printk( "TLAN: %s: Link forced.\n", dev->name ); } else { - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); - udelay(1000); - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); - if ((status & MII_GS_LINK) && - /* We only support link info on Nat.Sem. PHY's */ - (tlphy_id1 == NAT_SEM_ID1) && - (tlphy_id2 == NAT_SEM_ID2)) { - tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner); - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par); - - pr_info("TLAN: %s: Link active with ", dev->name); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); + udelay( 1000 ); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); + if ( (status & MII_GS_LINK) && + /* We only support link info on Nat.Sem. PHY's */ + (tlphy_id1 == NAT_SEM_ID1) && + (tlphy_id2 == NAT_SEM_ID2) ) { + TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); + TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); + + printk( "TLAN: %s: Link active with ", dev->name ); if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { - pr_info("forced 10%sMbps %s-Duplex\n", - tlphy_par & TLAN_PHY_SPEED_100 - ? "" : "0", - tlphy_par & TLAN_PHY_DUPLEX_FULL - ? "Full" : "Half"); + printk( "forced 10%sMbps %s-Duplex\n", + tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", + tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); } else { - pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n", - tlphy_par & TLAN_PHY_SPEED_100 - ? "" : "0", - tlphy_par & TLAN_PHY_DUPLEX_FULL - ? "Full" : "half"); - pr_info("TLAN: Partner capability: "); - for (i = 5; i <= 10; i++) - if (partner & (1<base_addr, TLAN_LED_REG, - TLAN_LED_LINK); + TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); #ifdef MONITOR /* We have link beat..for now anyway */ - priv->link = 1; - /*Enabling link beat monitoring */ - tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT); + priv->link = 1; + /*Enabling link beat monitoring */ + TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); #endif } else if (status & MII_GS_LINK) { - pr_info("TLAN: %s: Link active\n", dev->name); - tlan_dio_write8(dev->base_addr, TLAN_LED_REG, - TLAN_LED_LINK); + printk( "TLAN: %s: Link active\n", dev->name ); + TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); } } - if (priv->phy_num == 0) { - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); - tlphy_ctl |= TLAN_TC_INTEN; - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); - sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO); - sio |= TLAN_NET_SIO_MINTEN; - tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio); - } - - if (status & MII_GS_LINK) { - tlan_set_mac(dev, 0, dev->dev_addr); - priv->phy_online = 1; - outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1); - if (debug >= 1 && debug != TLAN_DEBUG_PROBE) - outb((TLAN_HC_REQ_INT >> 8), - dev->base_addr + TLAN_HOST_CMD + 1); - outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM); - outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD); + if ( priv->phyNum == 0 ) { + TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); + tlphy_ctl |= TLAN_TC_INTEN; + TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); + sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); + sio |= TLAN_NET_SIO_MINTEN; + TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); + } + + if ( status & MII_GS_LINK ) { + TLan_SetMac( dev, 0, dev->dev_addr ); + priv->phyOnline = 1; + outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); + if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { + outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); + } + outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); + outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); netif_carrier_on(dev); } else { - pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n", - dev->name); - tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET); + printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", + dev->name ); + TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); return; } - tlan_set_multicast_list(dev); + TLan_SetMulticastList(dev); -} +} /* TLan_FinishReset */ -/*************************************************************** - * tlan_set_mac - * - * Returns: - * Nothing - * Parms: - * dev Pointer to device structure of adapter - * on which to change the AREG. - * areg The AREG to set the address in (0 - 3). - * mac A pointer to an array of chars. Each - * element stores one byte of the address. - * IE, it isn't in ascii. - * - * This function transfers a MAC address to one of the - * TLAN AREGs (address registers). The TLAN chip locks - * the register on writing to offset 0 and unlocks the - * register after writing to offset 5. If NULL is passed - * in mac, then the AREG is filled with 0's. - * - **************************************************************/ + /*************************************************************** + * TLan_SetMac + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * on which to change the AREG. + * areg The AREG to set the address in (0 - 3). + * mac A pointer to an array of chars. Each + * element stores one byte of the address. + * IE, it isn't in ascii. + * + * This function transfers a MAC address to one of the + * TLAN AREGs (address registers). The TLAN chip locks + * the register on writing to offset 0 and unlocks the + * register after writing to offset 5. If NULL is passed + * in mac, then the AREG is filled with 0's. + * + **************************************************************/ -static void tlan_set_mac(struct net_device *dev, int areg, char *mac) +static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) { int i; areg *= 6; - if (mac != NULL) { - for (i = 0; i < 6; i++) - tlan_dio_write8(dev->base_addr, - TLAN_AREG_0 + areg + i, mac[i]); + if ( mac != NULL ) { + for ( i = 0; i < 6; i++ ) + TLan_DioWrite8( dev->base_addr, + TLAN_AREG_0 + areg + i, mac[i] ); } else { - for (i = 0; i < 6; i++) - tlan_dio_write8(dev->base_addr, - TLAN_AREG_0 + areg + i, 0); + for ( i = 0; i < 6; i++ ) + TLan_DioWrite8( dev->base_addr, + TLAN_AREG_0 + areg + i, 0 ); } -} +} /* TLan_SetMac */ @@ -2522,202 +2432,205 @@ static void tlan_set_mac(struct net_device *dev, int areg, char *mac) /***************************************************************************** ****************************************************************************** -ThunderLAN driver PHY layer routines + ThunderLAN Driver PHY Layer Routines ****************************************************************************** *****************************************************************************/ -/********************************************************************* - * tlan_phy_print - * - * Returns: - * Nothing - * Parms: - * dev A pointer to the device structure of the - * TLAN device having the PHYs to be detailed. - * - * This function prints the registers a PHY (aka transceiver). - * - ********************************************************************/ + /********************************************************************* + * TLan_PhyPrint + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the + * TLAN device having the PHYs to be detailed. + * + * This function prints the registers a PHY (aka transceiver). + * + ********************************************************************/ -static void tlan_phy_print(struct net_device *dev) +static void TLan_PhyPrint( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 i, data0, data1, data2, data3, phy; - phy = priv->phy[priv->phy_num]; - - if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { - pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name); - } else if (phy <= TLAN_PHY_MAX_ADDR) { - pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy); - pr_info("TLAN: Off. +0 +1 +2 +3\n"); - for (i = 0; i < 0x20; i += 4) { - pr_info("TLAN: 0x%02x", i); - tlan_mii_read_reg(dev, phy, i, &data0); - printk(" 0x%04hx", data0); - tlan_mii_read_reg(dev, phy, i + 1, &data1); - printk(" 0x%04hx", data1); - tlan_mii_read_reg(dev, phy, i + 2, &data2); - printk(" 0x%04hx", data2); - tlan_mii_read_reg(dev, phy, i + 3, &data3); - printk(" 0x%04hx\n", data3); + phy = priv->phy[priv->phyNum]; + + if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { + printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); + } else if ( phy <= TLAN_PHY_MAX_ADDR ) { + printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); + printk( "TLAN: Off. +0 +1 +2 +3\n" ); + for ( i = 0; i < 0x20; i+= 4 ) { + printk( "TLAN: 0x%02x", i ); + TLan_MiiReadReg( dev, phy, i, &data0 ); + printk( " 0x%04hx", data0 ); + TLan_MiiReadReg( dev, phy, i + 1, &data1 ); + printk( " 0x%04hx", data1 ); + TLan_MiiReadReg( dev, phy, i + 2, &data2 ); + printk( " 0x%04hx", data2 ); + TLan_MiiReadReg( dev, phy, i + 3, &data3 ); + printk( " 0x%04hx\n", data3 ); } } else { - pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name); + printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); } -} +} /* TLan_PhyPrint */ -/********************************************************************* - * tlan_phy_detect - * - * Returns: - * Nothing - * Parms: - * dev A pointer to the device structure of the adapter - * for which the PHY needs determined. - * - * So far I've found that adapters which have external PHYs - * may also use the internal PHY for part of the functionality. - * (eg, AUI/Thinnet). This function finds out if this TLAN - * chip has an internal PHY, and then finds the first external - * PHY (starting from address 0) if it exists). - * - ********************************************************************/ + /********************************************************************* + * TLan_PhyDetect + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the adapter + * for which the PHY needs determined. + * + * So far I've found that adapters which have external PHYs + * may also use the internal PHY for part of the functionality. + * (eg, AUI/Thinnet). This function finds out if this TLAN + * chip has an internal PHY, and then finds the first external + * PHY (starting from address 0) if it exists). + * + ********************************************************************/ -static void tlan_phy_detect(struct net_device *dev) +static void TLan_PhyDetect( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 control; u16 hi; u16 lo; u32 phy; - if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { - priv->phy_num = 0xffff; + if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { + priv->phyNum = 0xFFFF; return; } - tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi); + TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); - if (hi != 0xffff) + if ( hi != 0xFFFF ) { priv->phy[0] = TLAN_PHY_MAX_ADDR; - else + } else { priv->phy[0] = TLAN_PHY_NONE; + } priv->phy[1] = TLAN_PHY_NONE; - for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) { - tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control); - tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi); - tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo); - if ((control != 0xffff) || - (hi != 0xffff) || (lo != 0xffff)) { - TLAN_DBG(TLAN_DEBUG_GNRL, - "PHY found at %02x %04x %04x %04x\n", - phy, control, hi, lo); - if ((priv->phy[1] == TLAN_PHY_NONE) && - (phy != TLAN_PHY_MAX_ADDR)) { + for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { + TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); + TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); + TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); + if ( ( control != 0xFFFF ) || + ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { + TLAN_DBG( TLAN_DEBUG_GNRL, + "PHY found at %02x %04x %04x %04x\n", + phy, control, hi, lo ); + if ( ( priv->phy[1] == TLAN_PHY_NONE ) && + ( phy != TLAN_PHY_MAX_ADDR ) ) { priv->phy[1] = phy; } } } - if (priv->phy[1] != TLAN_PHY_NONE) - priv->phy_num = 1; - else if (priv->phy[0] != TLAN_PHY_NONE) - priv->phy_num = 0; - else - pr_info("TLAN: Cannot initialize device, no PHY was found!\n"); + if ( priv->phy[1] != TLAN_PHY_NONE ) { + priv->phyNum = 1; + } else if ( priv->phy[0] != TLAN_PHY_NONE ) { + priv->phyNum = 0; + } else { + printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); + } -} +} /* TLan_PhyDetect */ -static void tlan_phy_power_down(struct net_device *dev) +static void TLan_PhyPowerDown( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 value; - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; - tlan_mii_sync(dev->base_addr); - tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); - if ((priv->phy_num == 0) && - (priv->phy[1] != TLAN_PHY_NONE) && - (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) { - tlan_mii_sync(dev->base_addr); - tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value); + TLan_MiiSync( dev->base_addr ); + TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); + if ( ( priv->phyNum == 0 ) && + ( priv->phy[1] != TLAN_PHY_NONE ) && + ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { + TLan_MiiSync( dev->base_addr ); + TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); } /* Wait for 50 ms and powerup * This is abitrary. It is intended to make sure the * transceiver settles. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); + TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); -} +} /* TLan_PhyPowerDown */ -static void tlan_phy_power_up(struct net_device *dev) +static void TLan_PhyPowerUp( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 value; - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name); - tlan_mii_sync(dev->base_addr); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); + TLan_MiiSync( dev->base_addr ); value = MII_GC_LOOPBK; - tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); - tlan_mii_sync(dev->base_addr); + TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); + TLan_MiiSync(dev->base_addr); /* Wait for 500 ms and reset the * transceiver. The TLAN docs say both 50 ms and * 500 ms, so do the longer, just in case. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); + TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); -} +} /* TLan_PhyPowerUp */ -static void tlan_phy_reset(struct net_device *dev) +static void TLan_PhyReset( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 phy; u16 value; - phy = priv->phy[priv->phy_num]; + phy = priv->phy[priv->phyNum]; - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); - tlan_mii_sync(dev->base_addr); + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); + TLan_MiiSync( dev->base_addr ); value = MII_GC_LOOPBK | MII_GC_RESET; - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); - tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); - while (value & MII_GC_RESET) - tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); + TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); + while ( value & MII_GC_RESET ) { + TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); + } /* Wait for 500 ms and initialize. * I don't remember why I wait this long. * I've changed this to 50ms, as it seems long enough. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); + TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); -} +} /* TLan_PhyReset */ -static void tlan_phy_start_link(struct net_device *dev) +static void TLan_PhyStartLink( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 ability; u16 control; u16 data; @@ -2725,88 +2638,86 @@ static void tlan_phy_start_link(struct net_device *dev) u16 status; u16 tctl; - phy = priv->phy[priv->phy_num]; - TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name); - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability); + phy = priv->phy[priv->phyNum]; + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); - if ((status & MII_GS_AUTONEG) && - (!priv->aui)) { + if ( ( status & MII_GS_AUTONEG ) && + ( ! priv->aui ) ) { ability = status >> 11; - if (priv->speed == TLAN_SPEED_10 && - priv->duplex == TLAN_DUPLEX_HALF) { - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000); - } else if (priv->speed == TLAN_SPEED_10 && - priv->duplex == TLAN_DUPLEX_FULL) { - priv->tlan_full_duplex = true; - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100); - } else if (priv->speed == TLAN_SPEED_100 && - priv->duplex == TLAN_DUPLEX_HALF) { - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000); - } else if (priv->speed == TLAN_SPEED_100 && - priv->duplex == TLAN_DUPLEX_FULL) { - priv->tlan_full_duplex = true; - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100); + if ( priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_HALF) { + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); + } else if ( priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlanFullDuplex = true; + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); + } else if ( priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_HALF) { + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); + } else if ( priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlanFullDuplex = true; + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); } else { /* Set Auto-Neg advertisement */ - tlan_mii_write_reg(dev, phy, MII_AN_ADV, - (ability << 5) | 1); + TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); /* Enablee Auto-Neg */ - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000); + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); /* Restart Auto-Neg */ - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200); + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); /* Wait for 4 sec for autonegotiation - * to complete. The max spec time is less than this - * but the card need additional time to start AN. - * .5 sec should be plenty extra. - */ - pr_info("TLAN: %s: Starting autonegotiation.\n", - dev->name); - tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN); + * to complete. The max spec time is less than this + * but the card need additional time to start AN. + * .5 sec should be plenty extra. + */ + printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); + TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); return; } } - if ((priv->aui) && (priv->phy_num != 0)) { - priv->phy_num = 0; - data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN - | TLAN_NET_CFG_PHY_EN; - tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); - tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); + if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { + priv->phyNum = 0; + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; + TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); + TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); return; - } else if (priv->phy_num == 0) { + } else if ( priv->phyNum == 0 ) { control = 0; - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl); - if (priv->aui) { - tctl |= TLAN_TC_AUISEL; + TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); + if ( priv->aui ) { + tctl |= TLAN_TC_AUISEL; } else { - tctl &= ~TLAN_TC_AUISEL; - if (priv->duplex == TLAN_DUPLEX_FULL) { + tctl &= ~TLAN_TC_AUISEL; + if ( priv->duplex == TLAN_DUPLEX_FULL ) { control |= MII_GC_DUPLEX; - priv->tlan_full_duplex = true; + priv->tlanFullDuplex = true; } - if (priv->speed == TLAN_SPEED_100) + if ( priv->speed == TLAN_SPEED_100 ) { control |= MII_GC_SPEEDSEL; + } } - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control); - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl); + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); + TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); } /* Wait for 2 sec to give the transceiver time * to establish link. */ - tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET); + TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); -} +} /* TLan_PhyStartLink */ -static void tlan_phy_finish_auto_neg(struct net_device *dev) +static void TLan_PhyFinishAutoNeg( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 an_adv; u16 an_lpa; u16 data; @@ -2814,118 +2725,115 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev) u16 phy; u16 status; - phy = priv->phy[priv->phy_num]; + phy = priv->phy[priv->phyNum]; - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); - udelay(1000); - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); + udelay( 1000 ); + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); - if (!(status & MII_GS_AUTOCMPLT)) { + if ( ! ( status & MII_GS_AUTOCMPLT ) ) { /* Wait for 8 sec to give the process * more time. Perhaps we should fail after a while. */ - if (!priv->neg_be_verbose++) { - pr_info("TLAN: Giving autonegotiation more time.\n"); - pr_info("TLAN: Please check that your adapter has\n"); - pr_info("TLAN: been properly connected to a HUB or Switch.\n"); - pr_info("TLAN: Trying to establish link in the background...\n"); - } - tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN); + if (!priv->neg_be_verbose++) { + pr_info("TLAN: Giving autonegotiation more time.\n"); + pr_info("TLAN: Please check that your adapter has\n"); + pr_info("TLAN: been properly connected to a HUB or Switch.\n"); + pr_info("TLAN: Trying to establish link in the background...\n"); + } + TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); return; } - pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name); - tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv); - tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa); + printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); + TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); + TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); mode = an_adv & an_lpa & 0x03E0; - if (mode & 0x0100) - priv->tlan_full_duplex = true; - else if (!(mode & 0x0080) && (mode & 0x0040)) - priv->tlan_full_duplex = true; - - if ((!(mode & 0x0180)) && - (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && - (priv->phy_num != 0)) { - priv->phy_num = 0; - data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN - | TLAN_NET_CFG_PHY_EN; - tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); - tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); + if ( mode & 0x0100 ) { + priv->tlanFullDuplex = true; + } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { + priv->tlanFullDuplex = true; + } + + if ( ( ! ( mode & 0x0180 ) ) && + ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && + ( priv->phyNum != 0 ) ) { + priv->phyNum = 0; + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; + TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); + TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); return; } - if (priv->phy_num == 0) { - if ((priv->duplex == TLAN_DUPLEX_FULL) || - (an_adv & an_lpa & 0x0040)) { - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, - MII_GC_AUTOENB | MII_GC_DUPLEX); - pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n"); + if ( priv->phyNum == 0 ) { + if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || + ( an_adv & an_lpa & 0x0040 ) ) { + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, + MII_GC_AUTOENB | MII_GC_DUPLEX ); + pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); } else { - tlan_mii_write_reg(dev, phy, MII_GEN_CTL, - MII_GC_AUTOENB); - pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n"); + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); + pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); } } /* Wait for 100 ms. No reason in partiticular. */ - tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); + TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); -} +} /* TLan_PhyFinishAutoNeg */ #ifdef MONITOR -/********************************************************************* - * - * tlan_phy_monitor - * - * Returns: - * None - * - * Params: - * dev The device structure of this device. - * - * - * This function monitors PHY condition by reading the status - * register via the MII bus. This can be used to give info - * about link changes (up/down), and possible switch to alternate - * media. - * - *******************************************************************/ - -void tlan_phy_monitor(struct net_device *dev) + /********************************************************************* + * + * TLan_phyMonitor + * + * Returns: + * None + * + * Params: + * dev The device structure of this device. + * + * + * This function monitors PHY condition by reading the status + * register via the MII bus. This can be used to give info + * about link changes (up/down), and possible switch to alternate + * media. + * + * ******************************************************************/ + +void TLan_PhyMonitor( struct net_device *dev ) { - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); u16 phy; u16 phy_status; - phy = priv->phy[priv->phy_num]; + phy = priv->phy[priv->phyNum]; - /* Get PHY status register */ - tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status); + /* Get PHY status register */ + TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); - /* Check if link has been lost */ - if (!(phy_status & MII_GS_LINK)) { - if (priv->link) { - priv->link = 0; - printk(KERN_DEBUG "TLAN: %s has lost link\n", - dev->name); - netif_carrier_off(dev); - tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); - return; + /* Check if link has been lost */ + if (!(phy_status & MII_GS_LINK)) { + if (priv->link) { + priv->link = 0; + printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); + netif_carrier_off(dev); + TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); + return; } } - /* Link restablished? */ - if ((phy_status & MII_GS_LINK) && !priv->link) { - priv->link = 1; - printk(KERN_DEBUG "TLAN: %s has reestablished link\n", - dev->name); + /* Link restablished? */ + if ((phy_status & MII_GS_LINK) && !priv->link) { + priv->link = 1; + printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); netif_carrier_on(dev); - } + } /* Setup a new monitor */ - tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); + TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); } #endif /* MONITOR */ @@ -2934,48 +2842,47 @@ void tlan_phy_monitor(struct net_device *dev) /***************************************************************************** ****************************************************************************** -ThunderLAN driver MII routines + ThunderLAN Driver MII Routines -these routines are based on the information in chap. 2 of the -"ThunderLAN Programmer's Guide", pp. 15-24. + These routines are based on the information in Chap. 2 of the + "ThunderLAN Programmer's Guide", pp. 15-24. ****************************************************************************** *****************************************************************************/ -/*************************************************************** - * tlan_mii_read_reg - * - * Returns: - * false if ack received ok - * true if no ack received or other error - * - * Parms: - * dev The device structure containing - * The io address and interrupt count - * for this device. - * phy The address of the PHY to be queried. - * reg The register whose contents are to be - * retrieved. - * val A pointer to a variable to store the - * retrieved value. - * - * This function uses the TLAN's MII bus to retrieve the contents - * of a given register on a PHY. It sends the appropriate info - * and then reads the 16-bit register value from the MII bus via - * the TLAN SIO register. - * - **************************************************************/ - -static bool -tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) + /*************************************************************** + * TLan_MiiReadReg + * + * Returns: + * false if ack received ok + * true if no ack received or other error + * + * Parms: + * dev The device structure containing + * The io address and interrupt count + * for this device. + * phy The address of the PHY to be queried. + * reg The register whose contents are to be + * retrieved. + * val A pointer to a variable to store the + * retrieved value. + * + * This function uses the TLAN's MII bus to retrieve the contents + * of a given register on a PHY. It sends the appropriate info + * and then reads the 16-bit register value from the MII bus via + * the TLAN SIO register. + * + **************************************************************/ + +static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) { u8 nack; u16 sio, tmp; - u32 i; + u32 i; bool err; int minten; - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); unsigned long flags = 0; err = false; @@ -2985,48 +2892,48 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - tlan_mii_sync(dev->base_addr); + TLan_MiiSync(dev->base_addr); - minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); - if (minten) - tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); + minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); + if ( minten ) + TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); - tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ - tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */ - tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ - tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ + TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ + TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ + TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ + TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ - tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */ + TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */ - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */ + TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ + TLan_SetBit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ - nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */ - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */ - if (nack) { /* no ACK, so fake it */ + nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ + TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ + if (nack) { /* No ACK, so fake it */ for (i = 0; i < 16; i++) { - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); + TLan_SetBit(TLAN_NET_SIO_MCLK, sio); } tmp = 0xffff; err = true; } else { /* ACK, so read data */ for (tmp = 0, i = 0x8000; i; i >>= 1) { - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); - if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio)) + TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); + if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) tmp |= i; - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + TLan_SetBit(TLAN_NET_SIO_MCLK, sio); } } - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ + TLan_SetBit(TLAN_NET_SIO_MCLK, sio); - if (minten) - tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); + if ( minten ) + TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); *val = tmp; @@ -3035,117 +2942,116 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) return err; -} +} /* TLan_MiiReadReg */ -/*************************************************************** - * tlan_mii_send_data - * - * Returns: - * Nothing - * Parms: - * base_port The base IO port of the adapter in - * question. - * dev The address of the PHY to be queried. - * data The value to be placed on the MII bus. - * num_bits The number of bits in data that are to - * be placed on the MII bus. - * - * This function sends on sequence of bits on the MII - * configuration bus. - * - **************************************************************/ + /*************************************************************** + * TLan_MiiSendData + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * dev The address of the PHY to be queried. + * data The value to be placed on the MII bus. + * num_bits The number of bits in data that are to + * be placed on the MII bus. + * + * This function sends on sequence of bits on the MII + * configuration bus. + * + **************************************************************/ -static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits) +static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) { u16 sio; u32 i; - if (num_bits == 0) + if ( num_bits == 0 ) return; - outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; - tlan_set_bit(TLAN_NET_SIO_MTXEN, sio); + TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); - for (i = (0x1 << (num_bits - 1)); i; i >>= 1) { - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); - (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); - if (data & i) - tlan_set_bit(TLAN_NET_SIO_MDATA, sio); + for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { + TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); + (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); + if ( data & i ) + TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); else - tlan_clear_bit(TLAN_NET_SIO_MDATA, sio); - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); - (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); + TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); + (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); } -} +} /* TLan_MiiSendData */ -/*************************************************************** - * TLan_MiiSync - * - * Returns: - * Nothing - * Parms: - * base_port The base IO port of the adapter in - * question. - * - * This functions syncs all PHYs in terms of the MII configuration - * bus. - * - **************************************************************/ + /*************************************************************** + * TLan_MiiSync + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * + * This functions syncs all PHYs in terms of the MII configuration + * bus. + * + **************************************************************/ -static void tlan_mii_sync(u16 base_port) +static void TLan_MiiSync( u16 base_port ) { int i; u16 sio; - outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; - tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); - for (i = 0; i < 32; i++) { - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); + for ( i = 0; i < 32; i++ ) { + TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); + TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); } -} +} /* TLan_MiiSync */ -/*************************************************************** - * tlan_mii_write_reg - * - * Returns: - * Nothing - * Parms: - * dev The device structure for the device - * to write to. - * phy The address of the PHY to be written to. - * reg The register whose contents are to be - * written. - * val The value to be written to the register. - * - * This function uses the TLAN's MII bus to write the contents of a - * given register on a PHY. It sends the appropriate info and then - * writes the 16-bit register value from the MII configuration bus - * via the TLAN SIO register. - * - **************************************************************/ + /*************************************************************** + * TLan_MiiWriteReg + * + * Returns: + * Nothing + * Parms: + * dev The device structure for the device + * to write to. + * phy The address of the PHY to be written to. + * reg The register whose contents are to be + * written. + * val The value to be written to the register. + * + * This function uses the TLAN's MII bus to write the contents of a + * given register on a PHY. It sends the appropriate info and then + * writes the 16-bit register value from the MII configuration bus + * via the TLAN SIO register. + * + **************************************************************/ -static void -tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) +static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) { u16 sio; int minten; unsigned long flags = 0; - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; @@ -3153,30 +3059,30 @@ tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - tlan_mii_sync(dev->base_addr); + TLan_MiiSync( dev->base_addr ); - minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); - if (minten) - tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); + minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); + if ( minten ) + TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); - tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ - tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */ - tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ - tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ + TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ + TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ + TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ + TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ - tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */ - tlan_mii_send_data(dev->base_addr, val, 16); /* send data */ + TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ + TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ - tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ - tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ + TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); - if (minten) - tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); + if ( minten ) + TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); -} +} /* TLan_MiiWriteReg */ @@ -3184,226 +3090,229 @@ tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) /***************************************************************************** ****************************************************************************** -ThunderLAN driver eeprom routines + ThunderLAN Driver Eeprom routines -the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A -EEPROM. these functions are based on information in microchip's -data sheet. I don't know how well this functions will work with -other Eeproms. + The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A + EEPROM. These functions are based on information in Microchip's + data sheet. I don't know how well this functions will work with + other EEPROMs. ****************************************************************************** *****************************************************************************/ -/*************************************************************** - * tlan_ee_send_start - * - * Returns: - * Nothing - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * - * This function sends a start cycle to an EEPROM attached - * to a TLAN chip. - * - **************************************************************/ - -static void tlan_ee_send_start(u16 io_base) + /*************************************************************** + * TLan_EeSendStart + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * + * This function sends a start cycle to an EEPROM attached + * to a TLAN chip. + * + **************************************************************/ + +static void TLan_EeSendStart( u16 io_base ) { u16 sio; - outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_set_bit(TLAN_NET_SIO_EDATA, sio); - tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); - tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); - -} - - - - -/*************************************************************** - * tlan_ee_send_byte - * - * Returns: - * If the correct ack was received, 0, otherwise 1 - * Parms: io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * data The 8 bits of information to - * send to the EEPROM. - * stop If TLAN_EEPROM_STOP is passed, a - * stop cycle is sent after the - * byte is sent after the ack is - * read. - * - * This function sends a byte on the serial EEPROM line, - * driving the clock to send each bit. The function then - * reverses transmission direction and reads an acknowledge - * bit. - * - **************************************************************/ - -static int tlan_ee_send_byte(u16 io_base, u8 data, int stop) + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); + TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + +} /* TLan_EeSendStart */ + + + + + /*************************************************************** + * TLan_EeSendByte + * + * Returns: + * If the correct ack was received, 0, otherwise 1 + * Parms: io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data The 8 bits of information to + * send to the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is sent after the ack is + * read. + * + * This function sends a byte on the serial EEPROM line, + * driving the clock to send each bit. The function then + * reverses transmission direction and reads an acknowledge + * bit. + * + **************************************************************/ + +static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) { int err; u8 place; u16 sio; - outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; /* Assume clock is low, tx is enabled; */ - for (place = 0x80; place != 0; place >>= 1) { - if (place & data) - tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + for ( place = 0x80; place != 0; place >>= 1 ) { + if ( place & data ) + TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); else - tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); } - tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio); - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); - if ((!err) && stop) { + if ( ( ! err ) && stop ) { /* STOP, raise data while clock is high */ - tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); } return err; -} - - - - -/*************************************************************** - * tlan_ee_receive_byte - * - * Returns: - * Nothing - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * data An address to a char to hold the - * data sent from the EEPROM. - * stop If TLAN_EEPROM_STOP is passed, a - * stop cycle is sent after the - * byte is received, and no ack is - * sent. - * - * This function receives 8 bits of data from the EEPROM - * over the serial link. It then sends and ack bit, or no - * ack and a stop bit. This function is used to retrieve - * data after the address of a byte in the EEPROM has been - * sent. - * - **************************************************************/ - -static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop) +} /* TLan_EeSendByte */ + + + + + /*************************************************************** + * TLan_EeReceiveByte + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data An address to a char to hold the + * data sent from the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is received, and no ack is + * sent. + * + * This function receives 8 bits of data from the EEPROM + * over the serial link. It then sends and ack bit, or no + * ack and a stop bit. This function is used to retrieve + * data after the address of a byte in the EEPROM has been + * sent. + * + **************************************************************/ + +static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) { u8 place; u16 sio; - outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; *data = 0; /* Assume clock is low, tx is enabled; */ - tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); - for (place = 0x80; place; place >>= 1) { - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio)) + TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); + for ( place = 0x80; place; place >>= 1 ) { + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) *data |= place; - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); } - tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); - if (!stop) { - tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */ - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); + if ( ! stop ) { + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); } else { - tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */ - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); /* STOP, raise data while clock is high */ - tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); - tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); - tlan_set_bit(TLAN_NET_SIO_EDATA, sio); - } - -} - - - - -/*************************************************************** - * tlan_ee_read_byte - * - * Returns: - * No error = 0, else, the stage at which the error - * occurred. - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * ee_addr The address of the byte in the - * EEPROM whose contents are to be - * retrieved. - * data An address to a char to hold the - * data obtained from the EEPROM. - * - * This function reads a byte of information from an byte - * cell in the EEPROM. - * - **************************************************************/ - -static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data) + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); + TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); + TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); + } + +} /* TLan_EeReceiveByte */ + + + + + /*************************************************************** + * TLan_EeReadByte + * + * Returns: + * No error = 0, else, the stage at which the error + * occurred. + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * ee_addr The address of the byte in the + * EEPROM whose contents are to be + * retrieved. + * data An address to a char to hold the + * data obtained from the EEPROM. + * + * This function reads a byte of information from an byte + * cell in the EEPROM. + * + **************************************************************/ + +static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) { int err; - struct tlan_priv *priv = netdev_priv(dev); + TLanPrivateInfo *priv = netdev_priv(dev); unsigned long flags = 0; - int ret = 0; + int ret=0; spin_lock_irqsave(&priv->lock, flags); - tlan_ee_send_start(dev->base_addr); - err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK); - if (err) { - ret = 1; + TLan_EeSendStart( dev->base_addr ); + err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); + if (err) + { + ret=1; goto fail; } - err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK); - if (err) { - ret = 2; + err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); + if (err) + { + ret=2; goto fail; } - tlan_ee_send_start(dev->base_addr); - err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK); - if (err) { - ret = 3; + TLan_EeSendStart( dev->base_addr ); + err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); + if (err) + { + ret=3; goto fail; } - tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP); + TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); fail: spin_unlock_irqrestore(&priv->lock, flags); return ret; -} +} /* TLan_EeReadByte */ diff --git a/trunk/drivers/net/tlan.h b/trunk/drivers/net/tlan.h index 5fc98a8e4889..3315ced774e2 100644 --- a/trunk/drivers/net/tlan.h +++ b/trunk/drivers/net/tlan.h @@ -20,8 +20,8 @@ ********************************************************************/ -#include -#include +#include +#include #include @@ -40,11 +40,8 @@ #define TLAN_IGNORE 0 #define TLAN_RECORD 1 -#define TLAN_DBG(lvl, format, args...) \ - do { \ - if (debug&lvl) \ - printk(KERN_DEBUG "TLAN: " format, ##args); \ - } while (0) +#define TLAN_DBG(lvl, format, args...) \ + do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) #define TLAN_DEBUG_GNRL 0x0001 #define TLAN_DEBUG_TX 0x0002 @@ -53,8 +50,7 @@ #define TLAN_DEBUG_PROBE 0x0010 #define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ -#define MAX_TLAN_BOARDS 8 /* Max number of boards installed - at a time */ +#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ /***************************************************************** @@ -74,13 +70,13 @@ #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #endif -struct tlan_adapter_entry { - u16 vendor_id; - u16 device_id; - char *device_label; +typedef struct tlan_adapter_entry { + u16 vendorId; + u16 deviceId; + char *deviceLabel; u32 flags; - u16 addr_ofs; -}; + u16 addrOfs; +} TLanAdapterEntry; #define TLAN_ADAPTER_NONE 0x00000000 #define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 @@ -133,18 +129,18 @@ struct tlan_adapter_entry { #define TLAN_CSTAT_DP_PR 0x0100 -struct tlan_buffer { +typedef struct tlan_buffer_ref_tag { u32 count; u32 address; -}; +} TLanBufferRef; -struct tlan_list { +typedef struct tlan_list_tag { u32 forward; - u16 c_stat; - u16 frame_size; - struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST]; -}; + u16 cStat; + u16 frameSize; + TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; +} TLanList; typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; @@ -168,49 +164,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; * ****************************************************************/ -struct tlan_priv { - struct net_device *next_device; - struct pci_dev *pci_dev; +typedef struct tlan_private_tag { + struct net_device *nextDevice; + struct pci_dev *pciDev; struct net_device *dev; - void *dma_storage; - dma_addr_t dma_storage_dma; - unsigned int dma_size; - u8 *pad_buffer; - struct tlan_list *rx_list; - dma_addr_t rx_list_dma; - u8 *rx_buffer; - dma_addr_t rx_buffer_dma; - u32 rx_head; - u32 rx_tail; - u32 rx_eoc_count; - struct tlan_list *tx_list; - dma_addr_t tx_list_dma; - u8 *tx_buffer; - dma_addr_t tx_buffer_dma; - u32 tx_head; - u32 tx_in_progress; - u32 tx_tail; - u32 tx_busy_count; - u32 phy_online; - u32 timer_set_at; - u32 timer_type; + void *dmaStorage; + dma_addr_t dmaStorageDMA; + unsigned int dmaSize; + u8 *padBuffer; + TLanList *rxList; + dma_addr_t rxListDMA; + u8 *rxBuffer; + dma_addr_t rxBufferDMA; + u32 rxHead; + u32 rxTail; + u32 rxEocCount; + TLanList *txList; + dma_addr_t txListDMA; + u8 *txBuffer; + dma_addr_t txBufferDMA; + u32 txHead; + u32 txInProgress; + u32 txTail; + u32 txBusyCount; + u32 phyOnline; + u32 timerSetAt; + u32 timerType; struct timer_list timer; struct board *adapter; - u32 adapter_rev; + u32 adapterRev; u32 aui; u32 debug; u32 duplex; u32 phy[2]; - u32 phy_num; + u32 phyNum; u32 speed; - u8 tlan_rev; - u8 tlan_full_duplex; + u8 tlanRev; + u8 tlanFullDuplex; spinlock_t lock; u8 link; u8 is_eisa; struct work_struct tlan_tqueue; u8 neg_be_verbose; -}; +} TLanPrivateInfo; @@ -251,7 +247,7 @@ struct tlan_priv { ****************************************************************/ #define TLAN_HOST_CMD 0x00 -#define TLAN_HC_GO 0x80000000 +#define TLAN_HC_GO 0x80000000 #define TLAN_HC_STOP 0x40000000 #define TLAN_HC_ACK 0x20000000 #define TLAN_HC_CS_MASK 0x1FE00000 @@ -287,7 +283,7 @@ struct tlan_priv { #define TLAN_NET_CMD_TRFRAM 0x02 #define TLAN_NET_CMD_TXPACE 0x01 #define TLAN_NET_SIO 0x01 -#define TLAN_NET_SIO_MINTEN 0x80 +#define TLAN_NET_SIO_MINTEN 0x80 #define TLAN_NET_SIO_ECLOK 0x40 #define TLAN_NET_SIO_ETXEN 0x20 #define TLAN_NET_SIO_EDATA 0x10 @@ -308,7 +304,7 @@ struct tlan_priv { #define TLAN_NET_MASK_MASK4 0x10 #define TLAN_NET_MASK_RSRVD 0x0F #define TLAN_NET_CONFIG 0x04 -#define TLAN_NET_CFG_RCLK 0x8000 +#define TLAN_NET_CFG_RCLK 0x8000 #define TLAN_NET_CFG_TCLK 0x4000 #define TLAN_NET_CFG_BIT 0x2000 #define TLAN_NET_CFG_RXCRC 0x1000 @@ -376,7 +372,7 @@ struct tlan_priv { /* Generic MII/PHY Registers */ #define MII_GEN_CTL 0x00 -#define MII_GC_RESET 0x8000 +#define MII_GC_RESET 0x8000 #define MII_GC_LOOPBK 0x4000 #define MII_GC_SPEEDSEL 0x2000 #define MII_GC_AUTOENB 0x1000 @@ -401,9 +397,9 @@ struct tlan_priv { #define MII_GS_EXTCAP 0x0001 #define MII_GEN_ID_HI 0x02 #define MII_GEN_ID_LO 0x03 -#define MII_GIL_OUI 0xFC00 -#define MII_GIL_MODEL 0x03F0 -#define MII_GIL_REVISION 0x000F +#define MII_GIL_OUI 0xFC00 +#define MII_GIL_MODEL 0x03F0 +#define MII_GIL_REVISION 0x000F #define MII_AN_ADV 0x04 #define MII_AN_LPA 0x05 #define MII_AN_EXP 0x06 @@ -412,7 +408,7 @@ struct tlan_priv { #define TLAN_TLPHY_ID 0x10 #define TLAN_TLPHY_CTL 0x11 -#define TLAN_TC_IGLINK 0x8000 +#define TLAN_TC_IGLINK 0x8000 #define TLAN_TC_SWAPOL 0x4000 #define TLAN_TC_AUISEL 0x2000 #define TLAN_TC_SQEEN 0x1000 @@ -439,41 +435,41 @@ struct tlan_priv { #define LEVEL1_ID1 0x7810 #define LEVEL1_ID2 0x0000 -#define CIRC_INC(a, b) if (++a >= b) a = 0 +#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 /* Routines to access internal registers. */ -static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr) +static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); -} +} /* TLan_DioRead8 */ -static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr) +static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); -} +} /* TLan_DioRead16 */ -static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr) +static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inl(base_addr + TLAN_DIO_DATA); -} +} /* TLan_DioRead32 */ -static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data) +static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); @@ -483,7 +479,7 @@ static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data) -static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data) +static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); @@ -493,16 +489,16 @@ static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data) -static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data) +static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); } -#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port) -#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit)) -#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port) +#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) +#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) +#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) /* * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those @@ -510,37 +506,37 @@ static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data) * * The original code was: * - * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); } + * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } * - * #define XOR8(a, b, c, d, e, f, g, h) \ - * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) ) - * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) ) + * #define XOR8( a, b, c, d, e, f, g, h ) \ + * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) + * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) * - * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), - * DA(a,30), DA(a,36), DA(a,42)); - * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), - * DA(a,31), DA(a,37), DA(a,43)) << 1; - * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), - * DA(a,32), DA(a,38), DA(a,44)) << 2; - * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), - * DA(a,33), DA(a,39), DA(a,45)) << 3; - * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), - * DA(a,34), DA(a,40), DA(a,46)) << 4; - * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), - * DA(a,35), DA(a,41), DA(a,47)) << 5; + * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), + * DA(a,30), DA(a,36), DA(a,42) ); + * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), + * DA(a,31), DA(a,37), DA(a,43) ) << 1; + * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), + * DA(a,32), DA(a,38), DA(a,44) ) << 2; + * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), + * DA(a,33), DA(a,39), DA(a,45) ) << 3; + * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), + * DA(a,34), DA(a,40), DA(a,46) ) << 4; + * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), + * DA(a,35), DA(a,41), DA(a,47) ) << 5; * */ -static inline u32 tlan_hash_func(const u8 *a) +static inline u32 TLan_HashFunc( const u8 *a ) { - u8 hash; + u8 hash; - hash = (a[0]^a[3]); /* & 077 */ - hash ^= ((a[0]^a[3])>>6); /* & 003 */ - hash ^= ((a[1]^a[4])<<2); /* & 074 */ - hash ^= ((a[1]^a[4])>>4); /* & 017 */ - hash ^= ((a[2]^a[5])<<4); /* & 060 */ - hash ^= ((a[2]^a[5])>>2); /* & 077 */ + hash = (a[0]^a[3]); /* & 077 */ + hash ^= ((a[0]^a[3])>>6); /* & 003 */ + hash ^= ((a[1]^a[4])<<2); /* & 074 */ + hash ^= ((a[1]^a[4])>>4); /* & 017 */ + hash ^= ((a[2]^a[5])<<4); /* & 060 */ + hash ^= ((a[2]^a[5])>>2); /* & 077 */ - return hash & 077; + return hash & 077; } #endif diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 55786a0efc41..b100bd50a0d7 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun, * privs required. */ static int set_offload(struct net_device *dev, unsigned long arg) { - u32 old_features, features; + unsigned int old_features, features; old_features = dev->features; /* Unset features, set them as we chew on the arg. */ diff --git a/trunk/drivers/net/typhoon.c b/trunk/drivers/net/typhoon.c index 7fa5ec2de942..a3c46f6a15e7 100644 --- a/trunk/drivers/net/typhoon.c +++ b/trunk/drivers/net/typhoon.c @@ -123,11 +123,12 @@ static const int multicast_filter_limit = 32; #include #include #include +#include #include "typhoon.h" MODULE_AUTHOR("David Dillow "); -MODULE_VERSION("1.0"); +MODULE_VERSION(UTS_RELEASE); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_NAME); MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); diff --git a/trunk/drivers/net/usb/kaweth.c b/trunk/drivers/net/usb/kaweth.c index 7dc84971f26f..5e98643a4a21 100644 --- a/trunk/drivers/net/usb/kaweth.c +++ b/trunk/drivers/net/usb/kaweth.c @@ -406,7 +406,6 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth, if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { err("Firmware too big: %zu", fw->size); - release_firmware(fw); return -ENOSPC; } data_len = fw->size; diff --git a/trunk/drivers/net/veth.c b/trunk/drivers/net/veth.c index 105d7f0630cc..cc83fa71c3ff 100644 --- a/trunk/drivers/net/veth.c +++ b/trunk/drivers/net/veth.c @@ -403,6 +403,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tb[IFLA_ADDRESS] == NULL) random_ether_addr(dev->dev_addr); + if (tb[IFLA_IFNAME]) + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); + + if (strchr(dev->name, '%')) { + err = dev_alloc_name(dev, dev->name); + if (err < 0) + goto err_alloc_name; + } + err = register_netdevice(dev); if (err < 0) goto err_register_dev; @@ -422,6 +433,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, err_register_dev: /* nothing to do */ +err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; diff --git a/trunk/drivers/net/via-velocity.c b/trunk/drivers/net/via-velocity.c index 0d6fec6b7d93..09cac704fdd7 100644 --- a/trunk/drivers/net/via-velocity.c +++ b/trunk/drivers/net/via-velocity.c @@ -2923,7 +2923,6 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) static int velocity_set_wol(struct velocity_info *vptr) { struct mac_regs __iomem *regs = vptr->mac_regs; - enum speed_opt spd_dpx = vptr->options.spd_dpx; static u8 buf[256]; int i; @@ -2969,12 +2968,6 @@ static int velocity_set_wol(struct velocity_info *vptr) writew(0x0FFF, ®s->WOLSRClr); - if (spd_dpx == SPD_DPX_1000_FULL) - goto mac_done; - - if (spd_dpx != SPD_DPX_AUTO) - goto advertise_done; - if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); @@ -2985,7 +2978,6 @@ static int velocity_set_wol(struct velocity_info *vptr) if (vptr->mii_status & VELOCITY_SPEED_1000) MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); -advertise_done: BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); { @@ -2995,7 +2987,6 @@ static int velocity_set_wol(struct velocity_info *vptr) writeb(GCR, ®s->CHIPGCR); } -mac_done: BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); /* Turn on SWPTAG just before entering power mode */ BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); diff --git a/trunk/drivers/net/via-velocity.h b/trunk/drivers/net/via-velocity.h index d7227539484e..aa2e69b9ff61 100644 --- a/trunk/drivers/net/via-velocity.h +++ b/trunk/drivers/net/via-velocity.h @@ -361,7 +361,7 @@ enum velocity_owner { #define MAC_REG_CHIPGSR 0x9C #define MAC_REG_TESTCFG 0x9D #define MAC_REG_DEBUG 0x9E -#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */ +#define MAC_REG_CHIPGCR 0x9F #define MAC_REG_WOLCR0_SET 0xA0 #define MAC_REG_WOLCR1_SET 0xA1 #define MAC_REG_PWCFG_SET 0xA2 @@ -848,10 +848,10 @@ enum velocity_owner { * Bits in CHIPGCR register */ -#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */ -#define CHIPGCR_FCFDX 0x40 /* force full duplex */ +#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ +#define CHIPGCR_FCFDX 0x40 #define CHIPGCR_FCRESV 0x20 -#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */ +#define CHIPGCR_FCMODE 0x10 #define CHIPGCR_LPSOPT 0x08 #define CHIPGCR_TM1US 0x04 #define CHIPGCR_TM0US 0x02 diff --git a/trunk/drivers/net/vxge/vxge-config.c b/trunk/drivers/net/vxge/vxge-config.c index 77097e383cf4..01c05f53e2f9 100644 --- a/trunk/drivers/net/vxge/vxge-config.c +++ b/trunk/drivers/net/vxge/vxge-config.c @@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, data1 = steer_ctrl = 0; status = vxge_hw_vpath_fw_api(vpath, - VXGE_HW_FW_API_GET_EPROM_REV, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + VXGE_HW_FW_API_GET_EPROM_REV, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) break; @@ -2868,8 +2868,6 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, ring->rxd_init = attr->rxd_init; ring->rxd_term = attr->rxd_term; ring->buffer_mode = config->buffer_mode; - ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; - ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; ring->rxds_limit = config->rxds_limit; ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); @@ -3513,8 +3511,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, /* apply "interrupts per txdl" attribute */ fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; - fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; - fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; if (fifo->config->intr) fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; @@ -4381,8 +4377,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - vpath->tim_tti_cfg1_saved = val64; - val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { @@ -4439,7 +4433,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); - vpath->tim_tti_cfg3_saved = val64; } if (config->ring.enable == VXGE_HW_RING_ENABLE) { @@ -4488,8 +4481,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); - vpath->tim_rti_cfg1_saved = val64; - val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { @@ -4546,7 +4537,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); - vpath->tim_rti_cfg3_saved = val64; } val64 = 0; @@ -4565,6 +4555,26 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) return status; } +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vp_config *config; + u64 val64; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + config = vpath->vp_config; + + if (config->fifo.enable == VXGE_HW_FIFO_ENABLE && + config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { + config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + } +} + /* * __vxge_hw_vpath_initialize * This routine is the final phase of init which initializes the diff --git a/trunk/drivers/net/vxge/vxge-config.h b/trunk/drivers/net/vxge/vxge-config.h index 3c53aa732c9d..e249e288d160 100644 --- a/trunk/drivers/net/vxge/vxge-config.h +++ b/trunk/drivers/net/vxge/vxge-config.h @@ -682,10 +682,6 @@ struct __vxge_hw_virtualpath { u32 vsport_number; u32 max_kdfc_db; u32 max_nofl_db; - u64 tim_tti_cfg1_saved; - u64 tim_tti_cfg3_saved; - u64 tim_rti_cfg1_saved; - u64 tim_rti_cfg3_saved; struct __vxge_hw_ring *____cacheline_aligned ringh; struct __vxge_hw_fifo *____cacheline_aligned fifoh; @@ -925,9 +921,6 @@ struct __vxge_hw_ring { u32 doorbell_cnt; u32 total_db_cnt; u64 rxds_limit; - u32 rtimer; - u64 tim_rti_cfg1_saved; - u64 tim_rti_cfg3_saved; enum vxge_hw_status (*callback)( struct __vxge_hw_ring *ringh, @@ -1007,9 +1000,6 @@ struct __vxge_hw_fifo { u32 per_txdl_space; u32 vp_id; u32 tx_intr_num; - u32 rtimer; - u64 tim_tti_cfg1_saved; - u64 tim_tti_cfg3_saved; enum vxge_hw_status (*callback)( struct __vxge_hw_fifo *fifo_handle, diff --git a/trunk/drivers/net/vxge/vxge-main.c b/trunk/drivers/net/vxge/vxge-main.c index e40f619b62b1..c81a6512c683 100644 --- a/trunk/drivers/net/vxge/vxge-main.c +++ b/trunk/drivers/net/vxge/vxge-main.c @@ -371,6 +371,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, struct vxge_hw_ring_rxd_info ext_info; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); + ring->pkts_processed = 0; + + vxge_hw_ring_replenish(ringh); do { prefetch((char *)dtr + L1_CACHE_BYTES); @@ -1585,36 +1588,6 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) return ret; } -/* Configure CI */ -static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) -{ - int i = 0; - - /* Enable CI for RTI */ - if (vdev->config.intr_type == MSI_X) { - for (i = 0; i < vdev->no_of_vpath; i++) { - struct __vxge_hw_ring *hw_ring; - - hw_ring = vdev->vpaths[i].ring.handle; - vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); - } - } - - /* Enable CI for TTI */ - for (i = 0; i < vdev->no_of_vpath; i++) { - struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; - vxge_hw_vpath_tti_ci_set(hw_fifo); - /* - * For Inta (with or without napi), Set CI ON for only one - * vpath. (Have only one free running timer). - */ - if ((vdev->config.intr_type == INTA) && (i == 0)) - break; - } - - return; -} - static int do_vxge_reset(struct vxgedev *vdev, int event) { enum vxge_hw_status status; @@ -1780,9 +1753,6 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) netif_tx_wake_all_queues(vdev->ndev); } - /* configure CI */ - vxge_config_ci_for_tti_rti(vdev); - out: vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -1823,29 +1793,22 @@ static void vxge_reset(struct work_struct *work) */ static int vxge_poll_msix(struct napi_struct *napi, int budget) { - struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); - int pkts_processed; + struct vxge_ring *ring = + container_of(napi, struct vxge_ring, napi); int budget_org = budget; - ring->budget = budget; - ring->pkts_processed = 0; + vxge_hw_vpath_poll_rx(ring->handle); - pkts_processed = ring->pkts_processed; if (ring->pkts_processed < budget_org) { napi_complete(napi); - /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); - mmiowb(); } - /* We are copying and returning the local variable, in case if after - * clearing the msix interrupt above, if the interrupt fires right - * away which can preempt this NAPI thread */ - return pkts_processed; + return ring->pkts_processed; } static int vxge_poll_inta(struct napi_struct *napi, int budget) @@ -1861,7 +1824,6 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; ring->budget = budget; - ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); pkts_processed += ring->pkts_processed; budget -= ring->pkts_processed; @@ -2092,7 +2054,6 @@ static int vxge_open_vpaths(struct vxgedev *vdev) netdev_get_tx_queue(vdev->ndev, 0); vpath->fifo.indicate_max_pkts = vdev->config.fifo_indicate_max_pkts; - vpath->fifo.tx_vector_no = 0; vpath->ring.rx_vector_no = 0; vpath->ring.rx_csum = vdev->rx_csum; vpath->ring.rx_hwts = vdev->rx_hwts; @@ -2118,61 +2079,6 @@ static int vxge_open_vpaths(struct vxgedev *vdev) return VXGE_HW_OK; } -/** - * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing - * if the interrupts are not within a range - * @fifo: pointer to transmit fifo structure - * Description: The function changes boundary timer and restriction timer - * value depends on the traffic - * Return Value: None - */ -static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) -{ - fifo->interrupt_count++; - if (jiffies > fifo->jiffies + HZ / 100) { - struct __vxge_hw_fifo *hw_fifo = fifo->handle; - - fifo->jiffies = jiffies; - if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && - hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { - hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; - vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); - } else if (hw_fifo->rtimer != 0) { - hw_fifo->rtimer = 0; - vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); - } - fifo->interrupt_count = 0; - } -} - -/** - * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing - * if the interrupts are not within a range - * @ring: pointer to receive ring structure - * Description: The function increases of decreases the packet counts within - * the ranges of traffic utilization, if the interrupts due to this ring are - * not within a fixed range. - * Return Value: Nothing - */ -static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) -{ - ring->interrupt_count++; - if (jiffies > ring->jiffies + HZ / 100) { - struct __vxge_hw_ring *hw_ring = ring->handle; - - ring->jiffies = jiffies; - if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && - hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { - hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; - vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); - } else if (hw_ring->rtimer != 0) { - hw_ring->rtimer = 0; - vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); - } - ring->interrupt_count = 0; - } -} - /* * vxge_isr_napi * @irq: the irq of the device. @@ -2233,39 +2139,24 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) #ifdef CONFIG_PCI_MSI -static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) +static irqreturn_t +vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; - adaptive_coalesce_tx_interrupts(fifo); - - vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, - fifo->tx_vector_no); - - vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, - fifo->tx_vector_no); - VXGE_COMPLETE_VPATH_TX(fifo); - vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, - fifo->tx_vector_no); - - mmiowb(); - return IRQ_HANDLED; } -static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) +static irqreturn_t +vxge_rx_msix_napi_handle(int irq, void *dev_id) { struct vxge_ring *ring = (struct vxge_ring *)dev_id; - adaptive_coalesce_rx_interrupts(ring); - + /* MSIX_IDX for Rx is 1 */ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, - ring->rx_vector_no); - - vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, - ring->rx_vector_no); + ring->rx_vector_no); napi_schedule(&ring->napi); return IRQ_HANDLED; @@ -2282,20 +2173,14 @@ vxge_alarm_msix_handle(int irq, void *dev_id) VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; for (i = 0; i < vdev->no_of_vpath; i++) { - /* Reduce the chance of loosing alarm interrupts by masking - * the vector. A pending bit will be set if an alarm is - * generated and on unmask the interrupt will be fired. - */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); - vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); - mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { + vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, - msix_id); - mmiowb(); + msix_id); continue; } vxge_debug_intr(VXGE_ERR, @@ -2414,9 +2299,6 @@ static int vxge_enable_msix(struct vxgedev *vdev) vpath->ring.rx_vector_no = (vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE) + 1; - vpath->fifo.tx_vector_no = (vpath->device_id * - VXGE_HW_VPATH_MSIX_ACTIVE); - vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, VXGE_ALARM_MSIX_ID); } @@ -2592,9 +2474,8 @@ static int vxge_add_isr(struct vxgedev *vdev) "%s:vxge:INTA", vdev->ndev->name); vxge_hw_device_set_intr_type(vdev->devh, VXGE_HW_INTR_MODE_IRQLINE); - - vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); - + vxge_hw_vpath_tti_ci_set(vdev->devh, + vdev->vpaths[0].device_id); ret = request_irq((int) vdev->pdev->irq, vxge_isr_napi, IRQF_SHARED, vdev->desc[0], vdev); @@ -2864,10 +2745,6 @@ static int vxge_open(struct net_device *dev) } netif_tx_start_all_queues(vdev->ndev); - - /* configure CI */ - vxge_config_ci_for_tti_rti(vdev); - goto out0; out2: @@ -3471,7 +3348,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, vxge_debug_init(VXGE_ERR, "%s: vpath memory allocation failed", vdev->ndev->name); - ret = -ENOMEM; + ret = -ENODEV; goto _out1; } @@ -3492,11 +3369,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, if (vdev->config.gro_enable) ndev->features |= NETIF_F_GRO; - ret = register_netdev(ndev); - if (ret) { + if (register_netdev(ndev)) { vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: %s : device registration failed!", ndev->name, __func__); + ret = -ENODEV; goto _out2; } @@ -3567,11 +3444,6 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) /* in 2.6 will call stop() if device is up */ unregister_netdev(dev); - kfree(vdev->vpaths); - - /* we are safe to free it now */ - free_netdev(dev); - vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, @@ -3927,7 +3799,7 @@ static void __devinit vxge_device_config_init( break; case MSI_X: - device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; + device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; break; } @@ -4463,10 +4335,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit1; } - ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); - if (ret) { + if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { vxge_debug_init(VXGE_ERR, "%s : request regions failed", __func__); + ret = -ENODEV; goto _exit1; } @@ -4574,7 +4446,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) if (!img[i].is_valid) break; vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " - "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, + "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, VXGE_EPROM_IMG_MAJOR(img[i].version), VXGE_EPROM_IMG_MINOR(img[i].version), VXGE_EPROM_IMG_FIX(img[i].version), @@ -4771,9 +4643,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) _exit5: vxge_device_unregister(hldev); _exit4: - pci_set_drvdata(pdev, NULL); - vxge_hw_device_terminate(hldev); pci_disable_sriov(pdev); + vxge_hw_device_terminate(hldev); _exit3: iounmap(attr.bar0); _exit2: @@ -4784,7 +4655,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) kfree(ll_config); kfree(device_config); driver_config->config_dev_cnt--; - driver_config->total_dev_cnt--; + pci_set_drvdata(pdev, NULL); return ret; } @@ -4797,34 +4668,45 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) static void __devexit vxge_remove(struct pci_dev *pdev) { struct __vxge_hw_device *hldev; - struct vxgedev *vdev; - int i; + struct vxgedev *vdev = NULL; + struct net_device *dev; + int i = 0; hldev = pci_get_drvdata(pdev); + if (hldev == NULL) return; - vdev = netdev_priv(hldev->ndev); + dev = hldev->ndev; + vdev = netdev_priv(dev); vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); + vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", __func__); + vxge_device_unregister(hldev); - for (i = 0; i < vdev->no_of_vpath; i++) + for (i = 0; i < vdev->no_of_vpath; i++) { vxge_free_mac_add_list(&vdev->vpaths[i]); + vdev->vpaths[i].mcast_addr_cnt = 0; + vdev->vpaths[i].mac_addr_cnt = 0; + } + + kfree(vdev->vpaths); - vxge_device_unregister(hldev); - pci_set_drvdata(pdev, NULL); - /* Do not call pci_disable_sriov here, as it will break child devices */ - vxge_hw_device_terminate(hldev); iounmap(vdev->bar0); - pci_release_region(pdev, 0); - pci_disable_device(pdev); - driver_config->config_dev_cnt--; - driver_config->total_dev_cnt--; + + /* we are safe to free it now */ + free_netdev(dev); vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", __func__, __LINE__); + + vxge_hw_device_terminate(hldev); + + pci_disable_device(pdev); + pci_release_region(pdev, 0); + pci_set_drvdata(pdev, NULL); vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, __LINE__); } diff --git a/trunk/drivers/net/vxge/vxge-main.h b/trunk/drivers/net/vxge/vxge-main.h index 40474f0da576..5746fedc356f 100644 --- a/trunk/drivers/net/vxge/vxge-main.h +++ b/trunk/drivers/net/vxge/vxge-main.h @@ -59,13 +59,11 @@ #define VXGE_TTI_LTIMER_VAL 1000 #define VXGE_T1A_TTI_LTIMER_VAL 80 #define VXGE_TTI_RTIMER_VAL 0 -#define VXGE_TTI_RTIMER_ADAPT_VAL 10 #define VXGE_T1A_TTI_RTIMER_VAL 400 #define VXGE_RTI_BTIMER_VAL 250 #define VXGE_RTI_LTIMER_VAL 100 #define VXGE_RTI_RTIMER_VAL 0 -#define VXGE_RTI_RTIMER_ADAPT_VAL 15 -#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH +#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH #define VXGE_ISR_POLLING_CNT 8 #define VXGE_MAX_CONFIG_DEV 0xFF #define VXGE_EXEC_MODE_DISABLE 0 @@ -109,14 +107,6 @@ #define RTI_T1A_RX_UFC_C 50 #define RTI_T1A_RX_UFC_D 60 -/* - * The interrupt rate is maintained at 3k per second with the moderation - * parameters for most traffic but not all. This is the maximum interrupt - * count allowed per function with INTA or per vector in the case of - * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A. - */ -#define VXGE_T1A_MAX_INTERRUPT_COUNT 100 -#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200 /* Milli secs timer period */ #define VXGE_TIMER_DELAY 10000 @@ -257,11 +247,6 @@ struct vxge_fifo { int tx_steering_type; int indicate_max_pkts; - /* Adaptive interrupt moderation parameters used in T1A */ - unsigned long interrupt_count; - unsigned long jiffies; - - u32 tx_vector_no; /* Tx stats */ struct vxge_fifo_stats stats; } ____cacheline_aligned; @@ -286,10 +271,6 @@ struct vxge_ring { */ int driver_id; - /* Adaptive interrupt moderation parameters used in T1A */ - unsigned long interrupt_count; - unsigned long jiffies; - /* copy of the flag indicating whether rx_csum is to be used */ u32 rx_csum:1, rx_hwts:1; @@ -305,7 +286,7 @@ struct vxge_ring { int vlan_tag_strip; struct vlan_group *vlgrp; - u32 rx_vector_no; + int rx_vector_no; enum vxge_hw_status last_status; /* Rx stats */ diff --git a/trunk/drivers/net/vxge/vxge-traffic.c b/trunk/drivers/net/vxge/vxge-traffic.c index 8674f331311c..4c10d6c4075f 100644 --- a/trunk/drivers/net/vxge/vxge-traffic.c +++ b/trunk/drivers/net/vxge/vxge-traffic.c @@ -218,68 +218,6 @@ enum vxge_hw_status vxge_hw_vpath_intr_disable( return status; } -void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) -{ - struct vxge_hw_vpath_reg __iomem *vp_reg; - struct vxge_hw_vp_config *config; - u64 val64; - - if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) - return; - - vp_reg = fifo->vp_reg; - config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); - - if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { - config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; - val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; - fifo->tim_tti_cfg1_saved = val64; - writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - } -} - -void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) -{ - u64 val64 = ring->tim_rti_cfg1_saved; - - val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; - ring->tim_rti_cfg1_saved = val64; - writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); -} - -void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) -{ - u64 val64 = fifo->tim_tti_cfg3_saved; - u64 timer = (fifo->rtimer * 1000) / 272; - - val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); - if (timer) - val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | - VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); - - writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); - /* tti_cfg3_saved is not updated again because it is - * initialized at one place only - init time. - */ -} - -void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) -{ - u64 val64 = ring->tim_rti_cfg3_saved; - u64 timer = (ring->rtimer * 1000) / 272; - - val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); - if (timer) - val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | - VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); - - writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); - /* rti_cfg3_saved is not updated again because it is - * initialized at one place only - init time. - */ -} - /** * vxge_hw_channel_msix_mask - Mask MSIX Vector. * @channeh: Channel for rx or tx handle @@ -315,23 +253,6 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) &channel->common_reg->clear_msix_mask_vect[msix_id%4]); } -/** - * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. - * @channel: Channel for rx or tx handle - * @msix_id: MSI ID - * - * The function unmasks the msix interrupt for the given msix_id - * if configured in MSIX oneshot mode - * - * Returns: 0 - */ -void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) -{ - __vxge_hw_pio_mem_write32_upper( - (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), - &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); -} - /** * vxge_hw_device_set_intr_type - Updates the configuration * with new interrupt type. @@ -2269,15 +2190,20 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, if (vpath->hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { - __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( - VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, - 0, 32), &vp_reg->one_shot_vect0_en); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 0, 32), &vp_reg->one_shot_vect1_en); + } + + if (vpath->hldev->config.intr_mode == + VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 0, 32), &vp_reg->one_shot_vect2_en); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, + 0, 32), &vp_reg->one_shot_vect3_en); } } @@ -2302,32 +2228,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); } -/** - * vxge_hw_vpath_msix_clear - Clear MSIX Vector. - * @vp: Virtual Path handle. - * @msix_id: MSI ID - * - * The function clears the msix interrupt for the given msix_id - * - * Returns: 0, - * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range - * status. - * See also: - */ -void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) -{ - struct __vxge_hw_device *hldev = vp->vpath->hldev; - - if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) - __vxge_hw_pio_mem_write32_upper( - (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), - &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); - else - __vxge_hw_pio_mem_write32_upper( - (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), - &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); -} - /** * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. * @vp: Virtual Path handle. diff --git a/trunk/drivers/net/vxge/vxge-traffic.h b/trunk/drivers/net/vxge/vxge-traffic.h index 9d9dfda4c7ab..d48486d6afa1 100644 --- a/trunk/drivers/net/vxge/vxge-traffic.h +++ b/trunk/drivers/net/vxge/vxge-traffic.h @@ -2142,10 +2142,6 @@ void vxge_hw_device_clear_tx_rx( * Virtual Paths */ -void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring); - -void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo); - u32 vxge_hw_vpath_id( struct __vxge_hw_vpath_handle *vpath_handle); @@ -2249,8 +2245,6 @@ void vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, int msix_id); -void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id); - void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); void @@ -2275,9 +2269,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id); void vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); -void -vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id); - void vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh); @@ -2291,8 +2282,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); -void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo); - -void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring); +void +vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); #endif diff --git a/trunk/drivers/net/vxge/vxge-version.h b/trunk/drivers/net/vxge/vxge-version.h index 581e21525e85..ad2f99b9bcf3 100644 --- a/trunk/drivers/net/vxge/vxge-version.h +++ b/trunk/drivers/net/vxge/vxge-version.h @@ -16,8 +16,8 @@ #define VXGE_VERSION_MAJOR "2" #define VXGE_VERSION_MINOR "5" -#define VXGE_VERSION_FIX "2" -#define VXGE_VERSION_BUILD "22259" +#define VXGE_VERSION_FIX "1" +#define VXGE_VERSION_BUILD "22082" #define VXGE_VERSION_FOR "k" #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) diff --git a/trunk/drivers/net/wireless/ath/ar9170/main.c b/trunk/drivers/net/wireless/ath/ar9170/main.c index a9111e1161fd..32bf79e6a320 100644 --- a/trunk/drivers/net/wireless/ath/ar9170/main.c +++ b/trunk/drivers/net/wireless/ath/ar9170/main.c @@ -1945,8 +1945,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, static int ar9170_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { switch (action) { case IEEE80211_AMPDU_RX_START: diff --git a/trunk/drivers/net/wireless/ath/ath.h b/trunk/drivers/net/wireless/ath/ath.h index a6c6a466000f..e43210c8585c 100644 --- a/trunk/drivers/net/wireless/ath/ath.h +++ b/trunk/drivers/net/wireless/ath/ath.h @@ -108,14 +108,12 @@ enum ath_cipher { * struct ath_ops - Register read/write operations * * @read: Register read - * @multi_read: Multiple register read * @write: Register write * @enable_write_buffer: Enable multiple register writes * @write_flush: flush buffered register writes and disable buffering */ struct ath_ops { unsigned int (*read)(void *, u32 reg_offset); - void (*multi_read)(void *, u32 *addr, u32 *val, u16 count); void (*write)(void *, u32 val, u32 reg_offset); void (*enable_write_buffer)(void *); void (*write_flush) (void *); diff --git a/trunk/drivers/net/wireless/ath/ath5k/ahb.c b/trunk/drivers/net/wireless/ath/ath5k/ahb.c index ae84b86c3bf2..707cde149248 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/ahb.c +++ b/trunk/drivers/net/wireless/ath/ath5k/ahb.c @@ -31,8 +31,7 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) *csz = L1_CACHE_BYTES >> 2; } -static bool -ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) +bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath5k_softc *sc = common->priv; struct platform_device *pdev = to_platform_device(sc->dev); @@ -47,10 +46,10 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) eeprom += off; if (eeprom > eeprom_end) - return false; + return -EINVAL; *data = *eeprom; - return true; + return 0; } int ath5k_hw_read_srev(struct ath5k_hw *ah) diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index dae0bdcef257..09ae4ef0fd51 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -242,68 +242,73 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re \********************/ /* - * Returns true for the channel numbers used without all_channels modparam. + * Convert IEEE channel number to MHz frequency. */ -static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) +static inline short +ath5k_ieee2mhz(short chan) { - if (band == IEEE80211_BAND_2GHZ && chan <= 14) - return true; + if (chan <= 14 || chan >= 27) + return ieee80211chan2mhz(chan); + else + return 2212 + chan * 20; +} - return /* UNII 1,2 */ - (((chan & 3) == 0 && chan >= 36 && chan <= 64) || +/* + * Returns true for the channel numbers used without all_channels modparam. + */ +static bool ath5k_is_standard_channel(short chan) +{ + return ((chan <= 14) || + /* UNII 1,2 */ + ((chan & 3) == 0 && chan >= 36 && chan <= 64) || /* midband */ ((chan & 3) == 0 && chan >= 100 && chan <= 140) || /* UNII-3 */ - ((chan & 3) == 1 && chan >= 149 && chan <= 165) || - /* 802.11j 5.030-5.080 GHz (20MHz) */ - (chan == 8 || chan == 12 || chan == 16) || - /* 802.11j 4.9GHz (20MHz) */ - (chan == 184 || chan == 188 || chan == 192 || chan == 196)); + ((chan & 3) == 1 && chan >= 149 && chan <= 165)); } static unsigned int -ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, - unsigned int mode, unsigned int max) +ath5k_copy_channels(struct ath5k_hw *ah, + struct ieee80211_channel *channels, + unsigned int mode, + unsigned int max) { - unsigned int count, size, chfreq, freq, ch; - enum ieee80211_band band; + unsigned int i, count, size, chfreq, freq, ch; + + if (!test_bit(mode, ah->ah_modes)) + return 0; switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */ - size = 220; + size = 220 ; chfreq = CHANNEL_5GHZ; - band = IEEE80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: size = 26; chfreq = CHANNEL_2GHZ; - band = IEEE80211_BAND_2GHZ; break; default: ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); return 0; } - count = 0; - for (ch = 1; ch <= size && count < max; ch++) { - freq = ieee80211_channel_to_frequency(ch, band); - - if (freq == 0) /* mapping failed - not a standard channel */ - continue; + for (i = 0, count = 0; i < size && max > 0; i++) { + ch = i + 1 ; + freq = ath5k_ieee2mhz(ch); /* Check if channel is supported by the chipset */ if (!ath5k_channel_ok(ah, freq, chfreq)) continue; - if (!modparam_all_channels && - !ath5k_is_standard_channel(ch, band)) + if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) continue; /* Write channel info and increment counter */ channels[count].center_freq = freq; - channels[count].band = band; + channels[count].band = (chfreq == CHANNEL_2GHZ) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; switch (mode) { case AR5K_MODE_11A: case AR5K_MODE_11G: @@ -314,6 +319,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, } count++; + max--; } return count; @@ -358,7 +364,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_bitrates = 12; sband->channels = sc->channels; - sband->n_channels = ath5k_setup_channels(ah, sband->channels, + sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11G, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; @@ -384,7 +390,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) } sband->channels = sc->channels; - sband->n_channels = ath5k_setup_channels(ah, sband->channels, + sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11B, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; @@ -404,7 +410,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_bitrates = 8; sband->channels = &sc->channels[count_c]; - sband->n_channels = ath5k_setup_channels(ah, sband->channels, + sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11A, max_c); hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; @@ -439,6 +445,18 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) return ath5k_reset(sc, chan, true); } +static void +ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) +{ + sc->curmode = mode; + + if (mode == AR5K_MODE_11A) { + sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; + } else { + sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; + } +} + struct ath_vif_iter_data { const u8 *hw_macaddr; u8 mask[ETH_ALEN]; @@ -551,7 +569,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) "hw_rix out of bounds: %x\n", hw_rix)) return 0; - rix = sc->rate_idx[sc->curchan->band][hw_rix]; + rix = sc->rate_idx[sc->curband->band][hw_rix]; if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) rix = 0; @@ -1361,7 +1379,7 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, rxs->flag |= RX_FLAG_TSFT; rxs->freq = sc->curchan->center_freq; - rxs->band = sc->curchan->band; + rxs->band = sc->curband->band; rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; @@ -1376,7 +1394,7 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); if (rxs->rate_idx >= 0 && rs->rs_rate == - sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short) + sc->curband->bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; ath5k_debug_dump_skb(sc, skb, "RX ", 0); @@ -2536,6 +2554,7 @@ ath5k_init_hw(struct ath5k_softc *sc) * and then setup of the interrupt mask. */ sc->curchan = sc->hw->conf.channel; + sc->curband = &sc->sbands[sc->curchan->band]; sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; @@ -2662,8 +2681,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan, * so we should also free any remaining * tx buffers */ ath5k_drain_tx_buffs(sc); - if (chan) + if (chan) { sc->curchan = chan; + sc->curband = &sc->sbands[chan->band]; + } ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL, skip_pcu); if (ret) { @@ -2761,6 +2782,12 @@ ath5k_init(struct ieee80211_hw *hw) goto err; } + /* NB: setup here so ath5k_rate_update is happy */ + if (test_bit(AR5K_MODE_11A, ah->ah_modes)) + ath5k_setcurmode(sc, AR5K_MODE_11A); + else + ath5k_setcurmode(sc, AR5K_MODE_11B); + /* * Allocate tx+rx descriptors and populate the lists. */ diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.h b/trunk/drivers/net/wireless/ath/ath5k/base.h index 8f919dca95f1..6d511476e4d2 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.h +++ b/trunk/drivers/net/wireless/ath/ath5k/base.h @@ -183,6 +183,8 @@ struct ath5k_softc { enum nl80211_iftype opmode; struct ath5k_hw *ah; /* Atheros HW */ + struct ieee80211_supported_band *curband; + #ifdef CONFIG_ATH5K_DEBUG struct ath5k_dbg_info debug; /* debug info */ #endif /* CONFIG_ATH5K_DEBUG */ @@ -200,6 +202,7 @@ struct ath5k_softc { #define ATH_STAT_STARTED 4 /* opened & irqs enabled */ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ + unsigned int curmode; /* current phy mode */ struct ieee80211_channel *curchan; /* current h/w channel */ u16 nvifs; diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c index b6561f785c6e..80e625608bac 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c @@ -72,6 +72,7 @@ static int ath5k_eeprom_init_header(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + int ret; u16 val; u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; @@ -191,7 +192,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; - int i = 0; + int ret, i = 0; AR5K_EEPROM_READ(o++, val); ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; @@ -251,6 +252,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; + int ret; ee->ee_n_piers[mode] = 0; AR5K_EEPROM_READ(o++, val); @@ -513,6 +515,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, int o = *offset; int i = 0; u8 freq1, freq2; + int ret; u16 val; ee->ee_n_piers[mode] = 0; @@ -548,7 +551,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; - int i; + int i, ret; u16 val; u8 mask; @@ -967,6 +970,7 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) u32 offset; u8 i, c; u16 val; + int ret; u8 pd_gains = 0; /* Count how many curves we have and @@ -1224,7 +1228,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) struct ath5k_chan_pcal_info *chinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; - int idx, i; + int idx, i, ret; u16 val; u8 pd_gains = 0; @@ -1415,7 +1419,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) u8 *rate_target_pwr_num; u32 offset; u16 val; - int i; + int ret, i; offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; @@ -1589,7 +1593,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) struct ath5k_edge_power *rep; unsigned int fmask, pmask; unsigned int ctl_mode; - int i, j; + int ret, i, j; u32 offset; u16 val; @@ -1729,12 +1733,16 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) u8 mac_d[ETH_ALEN] = {}; u32 total, offset; u16 data; - int octet; + int octet, ret; - AR5K_EEPROM_READ(0x20, data); + ret = ath5k_hw_nvram_read(ah, 0x20, &data); + if (ret) + return ret; for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { - AR5K_EEPROM_READ(offset, data); + ret = ath5k_hw_nvram_read(ah, offset, &data); + if (ret) + return ret; total += data; mac_d[octet + 1] = data & 0xff; diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h index 6511c27d938e..7c09e150dbdc 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h @@ -241,8 +241,9 @@ enum ath5k_eeprom_freq_bands{ #define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 #define AR5K_EEPROM_READ(_o, _v) do { \ - if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \ - return -EIO; \ + ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \ + if (ret) \ + return ret; \ } while (0) #define AR5K_EEPROM_READ_HDR(_o, _v) \ @@ -268,6 +269,29 @@ enum ath5k_ctl_mode { AR5K_CTL_MODE_M = 15, }; +/* Default CTL ids for the 3 main reg domains. + * Atheros only uses these by default but vendors + * can have up to 32 different CTLs for different + * scenarios. Note that theese values are ORed with + * the mode id (above) so we can have up to 24 CTL + * datasets out of these 3 main regdomains. That leaves + * 8 ids that can be used by vendors and since 0x20 is + * missing from HAL sources i guess this is the set of + * custom CTLs vendors can use. */ +#define AR5K_CTL_FCC 0x10 +#define AR5K_CTL_CUSTOM 0x20 +#define AR5K_CTL_ETSI 0x30 +#define AR5K_CTL_MKK 0x40 + +/* Indicates a CTL with only mode set and + * no reg domain mapping, such CTLs are used + * for world roaming domains or simply when + * a reg domain is not set */ +#define AR5K_CTL_NO_REGDOMAIN 0xf0 + +/* Indicates an empty (invalid) CTL */ +#define AR5K_CTL_NO_CTL 0xff + /* Per channel calibration data, used for power table setup */ struct ath5k_chan_pcal_info_rf5111 { /* Power levels in half dbm units diff --git a/trunk/drivers/net/wireless/ath/ath5k/pci.c b/trunk/drivers/net/wireless/ath/ath5k/pci.c index 66598a0d1df0..7f8c5b0e9d2a 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath5k/pci.c @@ -69,8 +69,7 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) /* * Read from eeprom */ -static bool -ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) +bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) { struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; u32 status, timeout; @@ -91,15 +90,15 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); if (status & AR5K_EEPROM_STAT_RDDONE) { if (status & AR5K_EEPROM_STAT_RDERR) - return false; + return -EIO; *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & 0xffff); - return true; + return 0; } udelay(15); } - return false; + return -ETIMEDOUT; } int ath5k_hw_read_srev(struct ath5k_hw *ah) diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 76388c6d6692..5e300bd3d264 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -805,10 +805,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); - if (AR_SREV_9271(ah)) { - if (!ar9285_hw_cl_cal(ah, chan)) - return false; - } else if (AR_SREV_9285_12_OR_LATER(ah)) { + if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) { if (!ar9285_hw_clc(ah, chan)) return false; } else { diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index a25655640f48..4819747fa4c3 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) { #define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) /* make sure forced gain is not set */ - REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0); + REG_WRITE(ah, 0xa458, 0); /* Write the OFDM power per rate set */ /* 6 (LSB), 9, 12, 18 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0), + REG_WRITE(ah, 0xa3c0, POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* 24 (LSB), 36, 48, 54 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1), + REG_WRITE(ah, 0xa3c4, POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | @@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Write the CCK power per rate set */ /* 1L (LSB), reserved, 2L, 2S (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2), + REG_WRITE(ah, 0xa3c8, POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3), + REG_WRITE(ah, 0xa3cc, POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | @@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Write the HT20 power per rate set */ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4), + REG_WRITE(ah, 0xa3d0, POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | @@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 6 (LSB), 7, 12, 13 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5), + REG_WRITE(ah, 0xa3d4, POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | @@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 14 (LSB), 15, 20, 21 */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9), + REG_WRITE(ah, 0xa3e4, POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | @@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Mixed HT20 and HT40 rates */ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10), + REG_WRITE(ah, 0xa3e8, POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | @@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) * correct PAR difference between HT40 and HT20/LEGACY * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6), + REG_WRITE(ah, 0xa3d8, POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | @@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 6 (LSB), 7, 12, 13 (MSB) */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7), + REG_WRITE(ah, 0xa3dc, POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | @@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 14 (LSB), 15, 20, 21 */ - REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11), + REG_WRITE(ah, 0xa3ec, POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 038a0cbfc6e7..4ceddbbdfcee 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, */ if (rxsp->status11 & AR_CRCErr) rxs->rs_status |= ATH9K_RXERR_CRC; - else if (rxsp->status11 & AR_PHYErr) { + if (rxsp->status11 & AR_PHYErr) { phyerr = MS(rxsp->status11, AR_PHYErrCode); /* * If we reach a point here where AR_PostDelimCRCErr is @@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_phyerr = phyerr; } - } else if (rxsp->status11 & AR_DecryptCRCErr) + } + if (rxsp->status11 & AR_DecryptCRCErr) rxs->rs_status |= ATH9K_RXERR_DECRYPT; - else if (rxsp->status11 & AR_MichaelErr) + if (rxsp->status11 & AR_MichaelErr) rxs->rs_status |= ATH9K_RXERR_MIC; - if (rxsp->status11 & AR_KeyMiss) rxs->rs_status |= ATH9K_RXERR_DECRYPT; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 8bdda2cf9dd7..59bab6bd8a74 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -486,8 +486,6 @@ #define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) #define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) -#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2)) - #define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) #define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index 6636f3c6dcf9..3681caf54282 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -95,9 +95,9 @@ struct ath_config { * @BUF_XRETRY: To denote excessive retries of the buffer */ enum buffer_type { - BUF_AMPDU = BIT(0), - BUF_AGGR = BIT(1), - BUF_XRETRY = BIT(2), + BUF_AMPDU = BIT(2), + BUF_AGGR = BIT(3), + BUF_XRETRY = BIT(5), }; #define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) @@ -137,6 +137,7 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ WME_AC_VO) +#define ADDBA_EXCHANGE_ATTEMPTS 10 #define ATH_AGGR_DELIM_SZ 4 #define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ /* number of delimiters for encryption padding */ @@ -183,8 +184,7 @@ enum ATH_AGGR_STATUS { #define ATH_TXFIFO_DEPTH 8 struct ath_txq { - int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ - u32 axq_qnum; /* ath9k hardware queue number */ + u32 axq_qnum; u32 *axq_link; struct list_head axq_q; spinlock_t axq_lock; @@ -254,10 +254,7 @@ struct ath_atx_tid { }; struct ath_node { -#ifdef CONFIG_ATH9K_DEBUGFS - struct list_head list; /* for sc->nodes */ - struct ieee80211_sta *sta; /* station struct we're part of */ -#endif + struct ath_common *common; struct ath_atx_tid tid[WME_NUM_TID]; struct ath_atx_ac ac[WME_NUM_AC]; u16 maxampdu; @@ -280,11 +277,6 @@ struct ath_tx_control { #define ATH_TX_XRETRY 0x02 #define ATH_TX_BAR 0x04 -/** - * @txq_map: Index is mac80211 queue number. This is - * not necessarily the same as the hardware queue number - * (axq_qnum). - */ struct ath_tx { u16 seq_no; u32 txqsetup; @@ -350,6 +342,7 @@ struct ath_vif { __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ enum nl80211_iftype av_opmode; struct ath_buf *av_bcbuf; + struct ath_tx_control av_btxctl; u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */ }; @@ -567,20 +560,6 @@ struct ath_ant_comb { struct ath_wiphy; struct ath_rate_table; -struct ath9k_vif_iter_data { - const u8 *hw_macaddr; /* phy's hardware address, set - * before starting iteration for - * valid bssid mask. - */ - u8 mask[ETH_ALEN]; /* bssid mask */ - int naps; /* number of AP vifs */ - int nmeshes; /* number of mesh vifs */ - int nstations; /* number of station vifs */ - int nwds; /* number of nwd vifs */ - int nadhocs; /* number of adhoc vifs */ - int nothers; /* number of vifs not specified above. */ -}; - struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -620,10 +599,10 @@ struct ath_softc { u32 sc_flags; /* SC_OP_* */ u16 ps_flags; /* PS_* */ u16 curtxpow; + u8 nbcnvifs; + u16 nvifs; bool ps_enabled; bool ps_idle; - short nbcnvifs; - short nvifs; unsigned long ps_usecount; struct ath_config config; @@ -646,9 +625,6 @@ struct ath_softc { #ifdef CONFIG_ATH9K_DEBUGFS struct ath9k_debug debug; - spinlock_t nodes_lock; - struct list_head nodes; /* basically, stations */ - unsigned int tx_complete_poll_work_seen; #endif struct ath_beacon_config cur_beacon_conf; struct delayed_work tx_complete_work; @@ -707,7 +683,6 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); -bool ath9k_uses_beacons(int type); #ifdef CONFIG_PCI int ath_pci_init(void); @@ -752,9 +727,5 @@ bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); void ath_start_rfkill_poll(struct ath_softc *sc); extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); -void ath9k_calculate_iter_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ath9k_vif_iter_data *iter_data); - #endif /* ATH9K_H */ diff --git a/trunk/drivers/net/wireless/ath/ath9k/beacon.c b/trunk/drivers/net/wireless/ath/ath9k/beacon.c index ab8c05cf62f3..385ba03134ba 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/beacon.c +++ b/trunk/drivers/net/wireless/ath/ath9k/beacon.c @@ -244,7 +244,9 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) struct ath_buf, list); list_del(&avp->av_bcbuf->list); - if (ath9k_uses_beacons(vif->type)) { + if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || + sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC || + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { int slot; /* * Assign the vif to a beacon xmit slot. As @@ -279,8 +281,10 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) /* NB: the beacon data buffer must be 32-bit aligned. */ skb = ieee80211_beacon_get(sc->hw, vif); - if (skb == NULL) + if (skb == NULL) { + ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n"); return -ENOMEM; + } tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; sc->beacon.bc_tstamp = le64_to_cpu(tstamp); @@ -716,10 +720,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) iftype = sc->sc_ah->opmode; } - cur_conf->listen_interval = 1; - cur_conf->dtim_count = 1; - cur_conf->bmiss_timeout = - ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; + cur_conf->listen_interval = 1; + cur_conf->dtim_count = 1; + cur_conf->bmiss_timeout = + ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; /* * It looks like mac80211 may end up using beacon interval of zero in diff --git a/trunk/drivers/net/wireless/ath/ath9k/calib.c b/trunk/drivers/net/wireless/ath/ath9k/calib.c index b4a92a4313f6..b68a1acbddd0 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/calib.c @@ -382,8 +382,9 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, s16 default_nf; int i, j; - ah->caldata->channel = chan->channel; - ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT; + if (!ah->caldata) + return; + h = ah->caldata->nfCalHist; default_nf = ath9k_hw_get_default_nf(ah, chan); for (i = 0; i < NUM_NF_READINGS; i++) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.c b/trunk/drivers/net/wireless/ath/ath9k/debug.c index f0c80ec290d1..3586c43077a7 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.c +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.c @@ -587,60 +587,26 @@ static const struct file_operations fops_wiphy = { sc->debug.stats.txstats[WME_AC_BK].elem, \ sc->debug.stats.txstats[WME_AC_VI].elem, \ sc->debug.stats.txstats[WME_AC_VO].elem); \ - if (len >= size) \ - goto done; \ } while(0) -#define PRX(str, elem) \ -do { \ - len += snprintf(buf + len, size - len, \ - "%s%13u%11u%10u%10u\n", str, \ - (unsigned int)(sc->tx.txq[ATH_TXQ_AC_BE].elem), \ - (unsigned int)(sc->tx.txq[ATH_TXQ_AC_BK].elem), \ - (unsigned int)(sc->tx.txq[ATH_TXQ_AC_VI].elem), \ - (unsigned int)(sc->tx.txq[ATH_TXQ_AC_VO].elem)); \ - if (len >= size) \ - goto done; \ -} while(0) - -#define PRQLE(str, elem) \ -do { \ - len += snprintf(buf + len, size - len, \ - "%s%13i%11i%10i%10i\n", str, \ - list_empty(&sc->tx.txq[ATH_TXQ_AC_BE].elem), \ - list_empty(&sc->tx.txq[ATH_TXQ_AC_BK].elem), \ - list_empty(&sc->tx.txq[ATH_TXQ_AC_VI].elem), \ - list_empty(&sc->tx.txq[ATH_TXQ_AC_VO].elem)); \ - if (len >= size) \ - goto done; \ -} while (0) - static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char *buf; - unsigned int len = 0, size = 8000; - int i; + unsigned int len = 0, size = 2048; ssize_t retval = 0; - char tmp[32]; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; - len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x" - " poll-work-seen: %u\n" - "%30s %10s%10s%10s\n\n", - ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup, - sc->tx_complete_poll_work_seen, - "BE", "BK", "VI", "VO"); + len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); PR("MPDUs Queued: ", queued); PR("MPDUs Completed: ", completed); PR("Aggregates: ", a_aggr); - PR("AMPDUs Queued HW:", a_queued_hw); - PR("AMPDUs Queued SW:", a_queued_sw); + PR("AMPDUs Queued: ", a_queued); PR("AMPDUs Completed:", a_completed); PR("AMPDUs Retried: ", a_retries); PR("AMPDUs XRetried: ", a_xretries); @@ -652,223 +618,6 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, PR("DELIM Underrun: ", delim_underrun); PR("TX-Pkts-All: ", tx_pkts_all); PR("TX-Bytes-All: ", tx_bytes_all); - PR("hw-put-tx-buf: ", puttxbuf); - PR("hw-tx-start: ", txstart); - PR("hw-tx-proc-desc: ", txprocdesc); - len += snprintf(buf + len, size - len, - "%s%11p%11p%10p%10p\n", "txq-memory-address:", - &(sc->tx.txq[ATH_TXQ_AC_BE]), - &(sc->tx.txq[ATH_TXQ_AC_BK]), - &(sc->tx.txq[ATH_TXQ_AC_VI]), - &(sc->tx.txq[ATH_TXQ_AC_VO])); - if (len >= size) - goto done; - - PRX("axq-qnum: ", axq_qnum); - PRX("axq-depth: ", axq_depth); - PRX("axq-ampdu_depth: ", axq_ampdu_depth); - PRX("axq-stopped ", stopped); - PRX("tx-in-progress ", axq_tx_inprogress); - PRX("pending-frames ", pending_frames); - PRX("txq_headidx: ", txq_headidx); - PRX("txq_tailidx: ", txq_headidx); - - PRQLE("axq_q empty: ", axq_q); - PRQLE("axq_acq empty: ", axq_acq); - PRQLE("txq_fifo_pending: ", txq_fifo_pending); - for (i = 0; i < ATH_TXFIFO_DEPTH; i++) { - snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i); - PRQLE(tmp, txq_fifo[i]); - } - - /* Print out more detailed queue-info */ - for (i = 0; i <= WME_AC_BK; i++) { - struct ath_txq *txq = &(sc->tx.txq[i]); - struct ath_atx_ac *ac; - struct ath_atx_tid *tid; - if (len >= size) - goto done; - spin_lock_bh(&txq->axq_lock); - if (!list_empty(&txq->axq_acq)) { - ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, - list); - len += snprintf(buf + len, size - len, - "txq[%i] first-ac: %p sched: %i\n", - i, ac, ac->sched); - if (list_empty(&ac->tid_q) || (len >= size)) - goto done_for; - tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, - list); - len += snprintf(buf + len, size - len, - " first-tid: %p sched: %i paused: %i\n", - tid, tid->sched, tid->paused); - } - done_for: - spin_unlock_bh(&txq->axq_lock); - } - -done: - if (len > size) - len = size; - - retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - - return retval; -} - -static ssize_t read_file_stations(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - char *buf; - unsigned int len = 0, size = 64000; - struct ath_node *an = NULL; - ssize_t retval = 0; - int q; - - buf = kzalloc(size, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - len += snprintf(buf + len, size - len, - "Stations:\n" - " tid: addr sched paused buf_q-empty an ac\n" - " ac: addr sched tid_q-empty txq\n"); - - spin_lock(&sc->nodes_lock); - list_for_each_entry(an, &sc->nodes, list) { - len += snprintf(buf + len, size - len, - "%pM\n", an->sta->addr); - if (len >= size) - goto done; - - for (q = 0; q < WME_NUM_TID; q++) { - struct ath_atx_tid *tid = &(an->tid[q]); - len += snprintf(buf + len, size - len, - " tid: %p %s %s %i %p %p\n", - tid, tid->sched ? "sched" : "idle", - tid->paused ? "paused" : "running", - list_empty(&tid->buf_q), - tid->an, tid->ac); - if (len >= size) - goto done; - } - - for (q = 0; q < WME_NUM_AC; q++) { - struct ath_atx_ac *ac = &(an->ac[q]); - len += snprintf(buf + len, size - len, - " ac: %p %s %i %p\n", - ac, ac->sched ? "sched" : "idle", - list_empty(&ac->tid_q), ac->txq); - if (len >= size) - goto done; - } - } - -done: - spin_unlock(&sc->nodes_lock); - if (len > size) - len = size; - - retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - - return retval; -} - -static ssize_t read_file_misc(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_hw *ah = sc->sc_ah; - struct ieee80211_hw *hw = sc->hw; - char *buf; - unsigned int len = 0, size = 8000; - ssize_t retval = 0; - const char *tmp; - unsigned int reg; - struct ath9k_vif_iter_data iter_data; - - ath9k_calculate_iter_data(hw, NULL, &iter_data); - - buf = kzalloc(size, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - switch (sc->sc_ah->opmode) { - case NL80211_IFTYPE_ADHOC: - tmp = "ADHOC"; - break; - case NL80211_IFTYPE_MESH_POINT: - tmp = "MESH"; - break; - case NL80211_IFTYPE_AP: - tmp = "AP"; - break; - case NL80211_IFTYPE_STATION: - tmp = "STATION"; - break; - default: - tmp = "???"; - break; - } - - len += snprintf(buf + len, size - len, - "curbssid: %pM\n" - "OP-Mode: %s(%i)\n" - "Beacon-Timer-Register: 0x%x\n", - common->curbssid, - tmp, (int)(sc->sc_ah->opmode), - REG_READ(ah, AR_BEACON_PERIOD)); - - reg = REG_READ(ah, AR_TIMER_MODE); - len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (", - reg); - if (reg & AR_TBTT_TIMER_EN) - len += snprintf(buf + len, size - len, "TBTT "); - if (reg & AR_DBA_TIMER_EN) - len += snprintf(buf + len, size - len, "DBA "); - if (reg & AR_SWBA_TIMER_EN) - len += snprintf(buf + len, size - len, "SWBA "); - if (reg & AR_HCF_TIMER_EN) - len += snprintf(buf + len, size - len, "HCF "); - if (reg & AR_TIM_TIMER_EN) - len += snprintf(buf + len, size - len, "TIM "); - if (reg & AR_DTIM_TIMER_EN) - len += snprintf(buf + len, size - len, "DTIM "); - len += snprintf(buf + len, size - len, ")\n"); - - reg = sc->sc_ah->imask; - len += snprintf(buf + len, size - len, "imask: 0x%x (", reg); - if (reg & ATH9K_INT_SWBA) - len += snprintf(buf + len, size - len, "SWBA "); - if (reg & ATH9K_INT_BMISS) - len += snprintf(buf + len, size - len, "BMISS "); - if (reg & ATH9K_INT_CST) - len += snprintf(buf + len, size - len, "CST "); - if (reg & ATH9K_INT_RX) - len += snprintf(buf + len, size - len, "RX "); - if (reg & ATH9K_INT_RXHP) - len += snprintf(buf + len, size - len, "RXHP "); - if (reg & ATH9K_INT_RXLP) - len += snprintf(buf + len, size - len, "RXLP "); - if (reg & ATH9K_INT_BB_WATCHDOG) - len += snprintf(buf + len, size - len, "BB_WATCHDOG "); - /* there are other IRQs if one wanted to add them. */ - len += snprintf(buf + len, size - len, ")\n"); - - len += snprintf(buf + len, size - len, - "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i" - " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n", - iter_data.naps, iter_data.nstations, iter_data.nmeshes, - iter_data.nwds, iter_data.nadhocs, iter_data.nothers, - sc->nvifs, sc->nbcnvifs); - - len += snprintf(buf + len, size - len, - "Calculated-BSSID-Mask: %pM\n", - iter_data.mask); if (len > size) len = size; @@ -917,20 +666,6 @@ static const struct file_operations fops_xmit = { .llseek = default_llseek, }; -static const struct file_operations fops_stations = { - .read = read_file_stations, - .open = ath9k_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -static const struct file_operations fops_misc = { - .read = read_file_misc, - .open = ath9k_debugfs_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - static ssize_t read_file_recv(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1168,14 +903,6 @@ int ath9k_init_debug(struct ath_hw *ah) sc, &fops_xmit)) goto err; - if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, - sc, &fops_stations)) - goto err; - - if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, - sc, &fops_misc)) - goto err; - if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_recv)) goto err; diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.h b/trunk/drivers/net/wireless/ath/ath9k/debug.h index 980c9fa194b9..1e5078bd0344 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.h +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.h @@ -89,8 +89,7 @@ struct ath_interrupt_stats { * @queued: Total MPDUs (non-aggr) queued * @completed: Total MPDUs (non-aggr) completed * @a_aggr: Total no. of aggregates queued - * @a_queued_hw: Total AMPDUs queued to hardware - * @a_queued_sw: Total AMPDUs queued to software queues + * @a_queued: Total AMPDUs queued * @a_completed: Total AMPDUs completed * @a_retries: No. of AMPDUs retried (SW) * @a_xretries: No. of AMPDUs dropped due to xretries @@ -103,9 +102,6 @@ struct ath_interrupt_stats { * @desc_cfg_err: Descriptor configuration errors * @data_urn: TX data underrun errors * @delim_urn: TX delimiter underrun errors - * @puttxbuf: Number of times hardware was given txbuf to write. - * @txstart: Number of times hardware was told to start tx. - * @txprocdesc: Number of times tx descriptor was processed */ struct ath_tx_stats { u32 tx_pkts_all; @@ -113,8 +109,7 @@ struct ath_tx_stats { u32 queued; u32 completed; u32 a_aggr; - u32 a_queued_hw; - u32 a_queued_sw; + u32 a_queued; u32 a_completed; u32 a_retries; u32 a_xretries; @@ -124,9 +119,6 @@ struct ath_tx_stats { u32 desc_cfg_err; u32 data_underrun; u32 delim_underrun; - u32 puttxbuf; - u32 txstart; - u32 txprocdesc; }; /** diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom.c index 8c18bed3a558..d05163159572 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom.c @@ -89,38 +89,6 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, return false; } -void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, - int eep_start_loc, int size) -{ - int i = 0, j, addr; - u32 addrdata[8]; - u32 data[8]; - - for (addr = 0; addr < size; addr++) { - addrdata[i] = AR5416_EEPROM_OFFSET + - ((addr + eep_start_loc) << AR5416_EEPROM_S); - i++; - if (i == 8) { - REG_READ_MULTI(ah, addrdata, data, i); - - for (j = 0; j < i; j++) { - *eep_data = data[j]; - eep_data++; - } - i = 0; - } - } - - if (i != 0) { - REG_READ_MULTI(ah, addrdata, data, i); - - for (j = 0; j < i; j++) { - *eep_data = data[j]; - eep_data++; - } - } -} - bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) { return common->bus_ops->eeprom_read(common, off, data); diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h index bd82447f5b78..58e2ddc927a9 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h @@ -665,8 +665,6 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, u16 *indexL, u16 *indexR); bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); -void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, - int eep_start_loc, int size); void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, u8 *pVpdList, u16 numIntercepts, u8 *pRetVpdList); diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c index bc77a308c901..fbdff7e47952 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -27,13 +27,19 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } -#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) - -static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) +static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { +#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; - int addr, eep_start_loc = 64; + int addr, eep_start_loc = 0; + + eep_start_loc = 64; + + if (!ath9k_hw_use_flash(ah)) { + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { @@ -45,33 +51,8 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) } return true; -} - -static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) -{ - u16 *eep_data = (u16 *)&ah->eeprom.map4k; - - ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); - - return true; -} - -static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); - } - - if (common->bus_ops->ath_bus_type == ATH_USB) - return __ath9k_hw_usb_4k_fill_eeprom(ah); - else - return __ath9k_hw_4k_fill_eeprom(ah); -} - #undef SIZE_EEPROM_4K +} static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 8cd8333cc086..9b6bc8a953bc 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -17,7 +17,7 @@ #include "hw.h" #include "ar9002_phy.h" -#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16)) +#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16)) static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { @@ -29,15 +29,25 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; } -static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) +static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data; - int addr, eep_start_loc = AR9287_EEP_START_LOC; + int addr, eep_start_loc; eep_data = (u16 *)eep; - for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { + if (common->bus_ops->ath_bus_type == ATH_USB) + eep_start_loc = AR9287_HTC_EEP_START_LOC; + else + eep_start_loc = AR9287_EEP_START_LOC; + + if (!ath9k_hw_use_flash(ah)) { + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + for (addr = 0; addr < NUM_EEP_WORDS; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, ATH_DBG_EEPROM, @@ -50,31 +60,6 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) return true; } -static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah) -{ - u16 *eep_data = (u16 *)&ah->eeprom.map9287; - - ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, - AR9287_HTC_EEP_START_LOC, - SIZE_EEPROM_AR9287); - return true; -} - -static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); - } - - if (common->bus_ops->ath_bus_type == ATH_USB) - return __ath9k_hw_usb_ar9287_fill_eeprom(ah); - else - return __ath9k_hw_ar9287_fill_eeprom(ah); -} - static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { u32 sum = 0, el, integer; @@ -101,7 +86,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) need_swap = true; eepdata = (u16 *)(&ah->eeprom); - for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { + for (addr = 0; addr < NUM_EEP_WORDS; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c index c9318ff40964..749a93608664 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -86,10 +86,9 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); } -#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) - -static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah) +static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) { +#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.def; int addr, ar5416_eep_start_loc = 0x100; @@ -104,33 +103,8 @@ static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah) eep_data++; } return true; -} - -static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah) -{ - u16 *eep_data = (u16 *)&ah->eeprom.def; - - ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, - 0x100, SIZE_EEPROM_DEF); - return true; -} - -static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); - } - - if (common->bus_ops->ath_bus_type == ATH_USB) - return __ath9k_hw_usb_def_fill_eeprom(ah); - else - return __ath9k_hw_def_fill_eeprom(ah); -} - #undef SIZE_EEPROM_DEF +} static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc.h b/trunk/drivers/net/wireless/ath/ath9k/htc.h index 63549868e686..780ac5eac501 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc.h +++ b/trunk/drivers/net/wireless/ath/ath9k/htc.h @@ -366,7 +366,7 @@ struct ath9k_htc_priv { u16 seq_no; u32 bmiss_cnt; - struct ath9k_hw_cal_data caldata; + struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS]; spinlock_t beacon_lock; diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 8e04586c5256..38433f9bfe59 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -297,34 +297,6 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) return be32_to_cpu(val); } -static void ath9k_multi_regread(void *hw_priv, u32 *addr, - u32 *val, u16 count) -{ - struct ath_hw *ah = (struct ath_hw *) hw_priv; - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; - __be32 tmpaddr[8]; - __be32 tmpval[8]; - int i, ret; - - for (i = 0; i < count; i++) { - tmpaddr[i] = cpu_to_be32(addr[i]); - } - - ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, - (u8 *)tmpaddr , sizeof(u32) * count, - (u8 *)tmpval, sizeof(u32) * count, - 100); - if (unlikely(ret)) { - ath_dbg(common, ATH_DBG_WMI, - "Multiple REGISTER READ FAILED (count: %d)\n", count); - } - - for (i = 0; i < count; i++) { - val[i] = be32_to_cpu(tmpval[i]); - } -} - static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; @@ -435,7 +407,6 @@ static void ath9k_regwrite_flush(void *hw_priv) static const struct ath_ops ath9k_common_ops = { .read = ath9k_regread, - .multi_read = ath9k_multi_regread, .write = ath9k_regwrite, .enable_write_buffer = ath9k_enable_regwrite_buffer, .write_flush = ath9k_regwrite_flush, diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f14f37d29f45..f4d576bc3ccd 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -121,7 +121,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *channel = priv->hw->conf.channel; - struct ath9k_hw_cal_data *caldata = NULL; + struct ath9k_hw_cal_data *caldata; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; @@ -139,7 +139,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); - caldata = &priv->caldata; + caldata = &priv->caldata[channel->hw_value]; ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); if (ret) { ath_err(common, @@ -202,8 +202,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), fastcc); - if (!fastcc) - caldata = &priv->caldata; + caldata = &priv->caldata[channel->hw_value]; ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (ret) { ath_err(common, @@ -1549,7 +1548,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size) + u16 tid, u16 *ssn) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_sta *ista; diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 4615fd9c9aa0..1afb8bb85756 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -369,9 +369,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) else ah->config.ht_enable = 0; - /* PAPRD needs some more work to be enabled */ - ah->config.paprd_disable = 1; - ah->config.rx_intr_mitigation = true; ah->config.pcieSerDesWrite = true; @@ -495,17 +492,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->hw_version.devid == AR5416_AR9100_DEVID) ah->hw_version.macVersion = AR_SREV_VERSION_9100; - ath9k_hw_read_revisions(ah); - - /* - * Read back AR_WA into a permanent copy and set bits 14 and 17. - * We need to do this to avoid RMW of this register. We cannot - * read the reg when chip is asleep. - */ - ah->WARegVal = REG_READ(ah, AR_WA); - ah->WARegVal |= (AR_WA_D3_L1_DISABLE | - AR_WA_ASPM_TIMER_BASED_DISABLE); - if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; @@ -574,6 +560,14 @@ static int __ath9k_hw_init(struct ath_hw *ah) ath9k_hw_init_mode_regs(ah); + /* + * Read back AR_WA into a permanent copy and set bits 14 and 17. + * We need to do this to avoid RMW of this register. We cannot + * read the reg when chip is asleep. + */ + ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | + AR_WA_ASPM_TIMER_BASED_DISABLE); if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, 0, 0); @@ -1085,6 +1079,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) return false; } + ath9k_hw_read_revisions(ah); + return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } @@ -1937,8 +1933,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); - if (!ah->config.paprd_disable && - ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) + if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h index ad8c2c702130..5a3dfec45e96 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h @@ -70,9 +70,6 @@ #define REG_READ(_ah, _reg) \ ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) -#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \ - ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt)) - #define ENABLE_REGWRITE_BUFFER(_ah) \ do { \ if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ @@ -228,7 +225,6 @@ struct ath9k_ops_config { u32 pcie_waen; u8 analog_shiftreg; u8 ht_enable; - u8 paprd_disable; u32 ofdm_trig_low; u32 ofdm_trig_high; u32 cck_trig_high; diff --git a/trunk/drivers/net/wireless/ath/ath9k/init.c b/trunk/drivers/net/wireless/ath/ath9k/init.c index 5279653c90c7..767d8b86f1e1 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/init.c @@ -442,10 +442,9 @@ static int ath9k_init_queues(struct ath_softc *sc) sc->config.cabqReadytime = ATH_CABQ_READY_TIME; ath_cabq_update(sc); - for (i = 0; i < WME_NUM_AC; i++) { + for (i = 0; i < WME_NUM_AC; i++) sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); - sc->tx.txq_map[i]->mac80211_qnum = i; - } + return 0; } @@ -538,7 +537,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, if (!ah) return -ENOMEM; - ah->hw = sc->hw; ah->hw_version.devid = devid; ah->hw_version.subsysid = subsysid; sc->sc_ah = ah; @@ -560,10 +558,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); -#ifdef CONFIG_ATH9K_DEBUGFS - spin_lock_init(&sc->nodes_lock); - INIT_LIST_HEAD(&sc->nodes); -#endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, (unsigned long)sc); diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.c b/trunk/drivers/net/wireless/ath/ath9k/mac.c index c75d40fb86f1..180170d3ce25 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.c @@ -690,23 +690,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { - /* - * Treat these errors as mutually exclusive to avoid spurious - * extra error reports from the hardware. If a CRC error is - * reported, then decryption and MIC errors are irrelevant, - * the frame is going to be dropped either way - */ if (ads.ds_rxstatus8 & AR_CRCErr) rs->rs_status |= ATH9K_RXERR_CRC; - else if (ads.ds_rxstatus8 & AR_PHYErr) { + if (ads.ds_rxstatus8 & AR_PHYErr) { rs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); rs->rs_phyerr = phyerr; - } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + } + if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; - else if (ads.ds_rxstatus8 & AR_MichaelErr) + if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; - if (ads.ds_rxstatus8 & AR_KeyMiss) rs->rs_status |= ATH9K_RXERR_DECRYPT; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index facff102dd0e..f90a6ca94a76 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -251,9 +251,6 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, if (!ath_stoprecv(sc)) stopped = false; - if (!ath9k_hw_check_alive(ah)) - stopped = false; - /* XXX: do not flush receive queue here. We don't want * to flush data frames already in queue because of * changing channel. */ @@ -548,12 +545,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) struct ath_hw *ah = sc->sc_ah; an = (struct ath_node *)sta->drv_priv; -#ifdef CONFIG_ATH9K_DEBUGFS - spin_lock(&sc->nodes_lock); - list_add(&an->list, &sc->nodes); - spin_unlock(&sc->nodes_lock); - an->sta = sta; -#endif if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM) sc->sc_flags |= SC_OP_ENABLE_APM; @@ -569,13 +560,6 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; -#ifdef CONFIG_ATH9K_DEBUGFS - spin_lock(&sc->nodes_lock); - list_del(&an->list); - spin_unlock(&sc->nodes_lock); - an->sta = NULL; -#endif - if (sc->sc_flags & SC_OP_TXAGGR) ath_tx_node_cleanup(sc, an); } @@ -608,23 +592,17 @@ void ath9k_tasklet(unsigned long data) u32 status = sc->intrstatus; u32 rxmask; + ath9k_ps_wakeup(sc); + if (status & ATH9K_INT_FATAL) { ath_reset(sc, true); + ath9k_ps_restore(sc); return; } - ath9k_ps_wakeup(sc); spin_lock(&sc->sc_pcu_lock); - /* - * Only run the baseband hang check if beacons stop working in AP or - * IBSS mode, because it has a high false positive rate. For station - * mode it should not be necessary, since the upper layers will detect - * this through a beacon miss automatically and the following channel - * change will trigger a hardware reset anyway - */ - if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 && - !ath9k_hw_check_alive(ah)) + if (!ath9k_hw_check_alive(ah)) ieee80211_queue_work(sc->hw, &sc->hw_check_work); if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) @@ -991,7 +969,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) /* Stop ANI */ del_timer_sync(&common->ani.timer); - ath9k_ps_wakeup(sc); spin_lock_bh(&sc->sc_pcu_lock); ieee80211_stop_queues(hw); @@ -1038,7 +1015,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) /* Start ANI */ ath_start_ani(common); - ath9k_ps_restore(sc); return r; } @@ -1365,251 +1341,112 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); } -bool ath9k_uses_beacons(int type) -{ - switch (type) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: - return true; - default: - return false; - } -} - -static void ath9k_reclaim_beacon(struct ath_softc *sc, - struct ieee80211_vif *vif) +static int ath9k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; + enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; + int ret = 0; - /* Disable SWBA interrupt */ - sc->sc_ah->imask &= ~ATH9K_INT_SWBA; - ath9k_ps_wakeup(sc); - ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); - tasklet_kill(&sc->bcon_tasklet); - ath9k_ps_restore(sc); - - ath_beacon_return(sc, avp); - sc->sc_flags &= ~SC_OP_BEACONS; - - if (sc->nbcnvifs > 0) { - /* Re-enable beaconing */ - sc->sc_ah->imask |= ATH9K_INT_SWBA; - ath9k_ps_wakeup(sc); - ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); - ath9k_ps_restore(sc); - } -} - -static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath9k_vif_iter_data *iter_data = data; - int i; - - if (iter_data->hw_macaddr) - for (i = 0; i < ETH_ALEN; i++) - iter_data->mask[i] &= - ~(iter_data->hw_macaddr[i] ^ mac[i]); + mutex_lock(&sc->mutex); switch (vif->type) { - case NL80211_IFTYPE_AP: - iter_data->naps++; - break; case NL80211_IFTYPE_STATION: - iter_data->nstations++; + ic_opmode = NL80211_IFTYPE_STATION; break; - case NL80211_IFTYPE_ADHOC: - iter_data->nadhocs++; + case NL80211_IFTYPE_WDS: + ic_opmode = NL80211_IFTYPE_WDS; break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: - iter_data->nmeshes++; - break; - case NL80211_IFTYPE_WDS: - iter_data->nwds++; + if (sc->nbcnvifs >= ATH_BCBUF) { + ret = -ENOBUFS; + goto out; + } + ic_opmode = vif->type; break; default: - iter_data->nothers++; - break; + ath_err(common, "Interface type %d not yet supported\n", + vif->type); + ret = -EOPNOTSUPP; + goto out; } -} - -/* Called with sc->mutex held. */ -void ath9k_calculate_iter_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ath9k_vif_iter_data *iter_data) -{ - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - int i; - /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. - */ - memset(iter_data, 0, sizeof(*iter_data)); - iter_data->hw_macaddr = common->macaddr; - memset(&iter_data->mask, 0xff, ETH_ALEN); - - if (vif) - ath9k_vif_iter(iter_data, vif->addr, vif); + ath_dbg(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d\n", ic_opmode); - /* Get list of all active MAC addresses */ - spin_lock_bh(&sc->wiphy_lock); - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, - iter_data); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] == NULL) - continue; - ieee80211_iterate_active_interfaces_atomic( - sc->sec_wiphy[i]->hw, ath9k_vif_iter, iter_data); - } - spin_unlock_bh(&sc->wiphy_lock); -} + /* Set the VIF opmode */ + avp->av_opmode = ic_opmode; + avp->av_bslot = -1; -/* Called with sc->mutex held. */ -static void ath9k_calculate_summary_state(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_vif_iter_data iter_data; + sc->nvifs++; - ath9k_calculate_iter_data(hw, vif, &iter_data); + ath9k_set_bssid_mask(hw, vif); - /* Set BSSID mask. */ - memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); - ath_hw_setbssidmask(common); + if (sc->nvifs > 1) + goto out; /* skip global settings for secondary vif */ - /* Set op-mode & TSF */ - if (iter_data.naps > 0) { + if (ic_opmode == NL80211_IFTYPE_AP) { ath9k_hw_set_tsfadjust(ah, 1); sc->sc_flags |= SC_OP_TSF_RESET; - ah->opmode = NL80211_IFTYPE_AP; - } else { - ath9k_hw_set_tsfadjust(ah, 0); - sc->sc_flags &= ~SC_OP_TSF_RESET; - - if (iter_data.nwds + iter_data.nmeshes) - ah->opmode = NL80211_IFTYPE_AP; - else if (iter_data.nadhocs) - ah->opmode = NL80211_IFTYPE_ADHOC; - else - ah->opmode = NL80211_IFTYPE_STATION; } + /* Set the device opmode */ + ah->opmode = ic_opmode; + /* * Enable MIB interrupts when there are hardware phy counters. + * Note we only do this (at the moment) for station mode. */ - if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) { + if ((vif->type == NL80211_IFTYPE_STATION) || + (vif->type == NL80211_IFTYPE_ADHOC) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) { if (ah->config.enable_ani) ah->imask |= ATH9K_INT_MIB; ah->imask |= ATH9K_INT_TSFOOR; - } else { - ah->imask &= ~ATH9K_INT_MIB; - ah->imask &= ~ATH9K_INT_TSFOOR; } ath9k_hw_set_interrupts(ah, ah->imask); - /* Set up ANI */ - if ((iter_data.naps + iter_data.nadhocs) > 0) { + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { sc->sc_flags |= SC_OP_ANI_RUN; ath_start_ani(common); - } else { - sc->sc_flags &= ~SC_OP_ANI_RUN; - del_timer_sync(&common->ani.timer); } -} -/* Called with sc->mutex held, vif counts set up properly. */ -static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - - ath9k_calculate_summary_state(hw, vif); - - if (ath9k_uses_beacons(vif->type)) { - int error; - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); - /* This may fail because upper levels do not have beacons - * properly configured yet. That's OK, we assume it - * will be properly configured and then we will be notified - * in the info_changed method and set up beacons properly - * there. - */ - error = ath_beacon_alloc(aphy, vif); - if (error) - ath9k_reclaim_beacon(sc, vif); - else - ath_beacon_config(sc, vif); - } +out: + mutex_unlock(&sc->mutex); + return ret; } - -static int ath9k_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void ath9k_reclaim_beacon(struct ath_softc *sc, + struct ieee80211_vif *vif) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; - int ret = 0; - mutex_lock(&sc->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_WDS: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_MESH_POINT: - break; - default: - ath_err(common, "Interface type %d not yet supported\n", - vif->type); - ret = -EOPNOTSUPP; - goto out; - } + /* Disable SWBA interrupt */ + sc->sc_ah->imask &= ~ATH9K_INT_SWBA; + ath9k_ps_wakeup(sc); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + tasklet_kill(&sc->bcon_tasklet); + ath9k_ps_restore(sc); - if (ath9k_uses_beacons(vif->type)) { - if (sc->nbcnvifs >= ATH_BCBUF) { - ath_err(common, "Not enough beacon buffers when adding" - " new interface of type: %i\n", - vif->type); - ret = -ENOBUFS; - goto out; - } - } + ath_beacon_return(sc, avp); + sc->sc_flags &= ~SC_OP_BEACONS; - if ((vif->type == NL80211_IFTYPE_ADHOC) && - sc->nvifs > 0) { - ath_err(common, "Cannot create ADHOC interface when other" - " interfaces already exist.\n"); - ret = -EINVAL; - goto out; + if (sc->nbcnvifs > 0) { + /* Re-enable beaconing */ + sc->sc_ah->imask |= ATH9K_INT_SWBA; + ath9k_ps_wakeup(sc); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); + ath9k_ps_restore(sc); } - - ath_dbg(common, ATH_DBG_CONFIG, - "Attach a VIF of type: %d\n", vif->type); - - /* Set the VIF opmode */ - avp->av_opmode = vif->type; - avp->av_bslot = -1; - - sc->nvifs++; - - ath9k_do_vif_add_setup(hw, vif); -out: - mutex_unlock(&sc->mutex); - return ret; } static int ath9k_change_interface(struct ieee80211_hw *hw, @@ -1625,33 +1462,32 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); mutex_lock(&sc->mutex); - /* See if new interface type is valid. */ - if ((new_type == NL80211_IFTYPE_ADHOC) && - (sc->nvifs > 1)) { - ath_err(common, "When using ADHOC, it must be the only" - " interface.\n"); - ret = -EINVAL; - goto out; - } - - if (ath9k_uses_beacons(new_type) && - !ath9k_uses_beacons(vif->type)) { + switch (new_type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: if (sc->nbcnvifs >= ATH_BCBUF) { ath_err(common, "No beacon slot available\n"); ret = -ENOBUFS; goto out; } + break; + case NL80211_IFTYPE_STATION: + /* Stop ANI */ + sc->sc_flags &= ~SC_OP_ANI_RUN; + del_timer_sync(&common->ani.timer); + if ((vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_ADHOC)) + ath9k_reclaim_beacon(sc, vif); + break; + default: + ath_err(common, "Interface type %d not yet supported\n", + vif->type); + ret = -ENOTSUPP; + goto out; } - - /* Clean up old vif stuff */ - if (ath9k_uses_beacons(vif->type)) - ath9k_reclaim_beacon(sc, vif); - - /* Add new settings */ vif->type = new_type; vif->p2p = p2p; - ath9k_do_vif_add_setup(hw, vif); out: mutex_unlock(&sc->mutex); return ret; @@ -1668,13 +1504,17 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); - sc->nvifs--; + /* Stop ANI */ + sc->sc_flags &= ~SC_OP_ANI_RUN; + del_timer_sync(&common->ani.timer); /* Reclaim beacon resources */ - if (ath9k_uses_beacons(vif->type)) + if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || + (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) ath9k_reclaim_beacon(sc, vif); - ath9k_calculate_summary_state(hw, NULL); + sc->nvifs--; mutex_unlock(&sc->mutex); } @@ -1861,9 +1701,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) skip_chan_change: if (changed & IEEE80211_CONF_CHANGE_POWER) { sc->config.txpowlimit = 2 * conf->power_level; - ath9k_ps_wakeup(sc); ath_update_txpow(sc); - ath9k_ps_restore(sc); } spin_lock_bh(&sc->wiphy_lock); @@ -2180,7 +2018,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size) + u16 tid, u16 *ssn) { struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; diff --git a/trunk/drivers/net/wireless/ath/ath9k/recv.c b/trunk/drivers/net/wireless/ath/ath9k/recv.c index 116f0582af24..b2497b8601e5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/recv.c +++ b/trunk/drivers/net/wireless/ath/ath9k/recv.c @@ -588,14 +588,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) return; mgmt = (struct ieee80211_mgmt *)skb->data; - if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) { - /* TODO: This doesn't work well if you have stations - * associated to two different APs because curbssid - * is just the last AP that any of the stations associated - * with. - */ + if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) return; /* not from our current AP */ - } sc->ps_flags &= ~PS_WAIT_FOR_BEACON; @@ -990,14 +984,8 @@ static void ath9k_process_rssi(struct ath_common *common, fc = hdr->frame_control; if (!ieee80211_is_beacon(fc) || - compare_ether_addr(hdr->addr3, common->curbssid)) { - /* TODO: This doesn't work well if you have stations - * associated to two different APs because curbssid - * is just the last AP that any of the stations associated - * with. - */ + compare_ether_addr(hdr->addr3, common->curbssid)) return; - } if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi); diff --git a/trunk/drivers/net/wireless/ath/ath9k/virtual.c b/trunk/drivers/net/wireless/ath/ath9k/virtual.c index d205c66cd972..2dc7095e56d1 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/virtual.c +++ b/trunk/drivers/net/wireless/ath/ath9k/virtual.c @@ -18,6 +18,54 @@ #include "ath9k.h" +struct ath9k_vif_iter_data { + const u8 *hw_macaddr; + u8 mask[ETH_ALEN]; +}; + +static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_vif_iter_data *iter_data = data; + int i; + + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); +} + +void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_vif_iter_data iter_data; + int i; + + /* + * Use the hardware MAC address as reference, the hardware uses it + * together with the BSSID mask when matching addresses. + */ + iter_data.hw_macaddr = common->macaddr; + memset(&iter_data.mask, 0xff, ETH_ALEN); + + if (vif) + ath9k_vif_iter(&iter_data, vif->addr, vif); + + /* Get list of all active MAC addresses */ + spin_lock_bh(&sc->wiphy_lock); + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, + &iter_data); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] == NULL) + continue; + ieee80211_iterate_active_interfaces_atomic( + sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data); + } + spin_unlock_bh(&sc->wiphy_lock); + + memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); + ath_hw_setbssidmask(common); +} + int ath9k_wiphy_add(struct ath_softc *sc) { int i, error; diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c index dcac811ddab5..332d1feb5c18 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c +++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c @@ -19,6 +19,7 @@ #define BITS_PER_BYTE 8 #define OFDM_PLCP_BITS 22 +#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f) #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) #define L_STF 8 #define L_LTF 8 @@ -31,6 +32,7 @@ #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) +#define OFDM_SIFS_TIME 16 static u16 bits_per_symbol[][2] = { /* 20MHz 40MHz */ @@ -167,7 +169,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ath_tx_update_baw(sc, tid, fi->seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); } else { - ath_tx_send_normal(sc, txq, NULL, &bf_head); + ath_tx_send_normal(sc, txq, tid, &bf_head); } spin_lock_bh(&txq->axq_lock); } @@ -427,7 +429,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); while (bf) { - txfail = txpending = sendbar = 0; + txfail = txpending = 0; bf_next = bf->bf_next; skb = bf->bf_mpdu; @@ -854,10 +856,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, txtid->state |= AGGR_ADDBA_PROGRESS; txtid->paused = true; - *ssn = txtid->seq_start = txtid->seq_next; - - memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); - txtid->baw_head = txtid->baw_tail = 0; + *ssn = txtid->seq_start; return 0; } @@ -943,7 +942,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) [WME_AC_VI] = ATH_TXQ_AC_VI, [WME_AC_VO] = ATH_TXQ_AC_VO, }; - int axq_qnum, i; + int qnum, i; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype_txq_to_hwq[subtype]; @@ -977,25 +976,24 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; } - axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); - if (axq_qnum == -1) { + qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); + if (qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } - if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) { + if (qnum >= ARRAY_SIZE(sc->tx.txq)) { ath_err(common, "qnum %u out of range, max %zu!\n", - axq_qnum, ARRAY_SIZE(sc->tx.txq)); - ath9k_hw_releasetxqueue(ah, axq_qnum); + qnum, ARRAY_SIZE(sc->tx.txq)); + ath9k_hw_releasetxqueue(ah, qnum); return NULL; } - if (!ATH_TXQ_SETUP(sc, axq_qnum)) { - struct ath_txq *txq = &sc->tx.txq[axq_qnum]; + if (!ATH_TXQ_SETUP(sc, qnum)) { + struct ath_txq *txq = &sc->tx.txq[qnum]; - txq->axq_qnum = axq_qnum; - txq->mac80211_qnum = -1; + txq->axq_qnum = qnum; txq->axq_link = NULL; INIT_LIST_HEAD(&txq->axq_q); INIT_LIST_HEAD(&txq->axq_acq); @@ -1003,14 +1001,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) txq->axq_depth = 0; txq->axq_ampdu_depth = 0; txq->axq_tx_inprogress = false; - sc->tx.txqsetup |= 1<tx.txqsetup |= 1<txq_headidx = txq->txq_tailidx = 0; for (i = 0; i < ATH_TXFIFO_DEPTH; i++) INIT_LIST_HEAD(&txq->txq_fifo[i]); INIT_LIST_HEAD(&txq->txq_fifo_pending); } - return &sc->tx.txq[axq_qnum]; + return &sc->tx.txq[qnum]; } int ath_txq_update(struct ath_softc *sc, int qnum, @@ -1220,59 +1218,46 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) sc->tx.txqsetup &= ~(1<axq_qnum); } -/* For each axq_acq entry, for each tid, try to schedule packets - * for transmit until ampdu_depth has reached min Q depth. - */ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { - struct ath_atx_ac *ac, *ac_tmp, *last_ac; - struct ath_atx_tid *tid, *last_tid; + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; - if (list_empty(&txq->axq_acq) || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + if (list_empty(&txq->axq_acq)) return; ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); - last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); + list_del(&ac->list); + ac->sched = false; - list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { - last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); - list_del(&ac->list); - ac->sched = false; + do { + if (list_empty(&ac->tid_q)) + return; - while (!list_empty(&ac->tid_q)) { - tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, - list); - list_del(&tid->list); - tid->sched = false; + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); + list_del(&tid->list); + tid->sched = false; - if (tid->paused) - continue; + if (tid->paused) + continue; - ath_tx_sched_aggr(sc, txq, tid); + ath_tx_sched_aggr(sc, txq, tid); - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (!list_empty(&tid->buf_q)) - ath_tx_queue_tid(txq, tid); + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (!list_empty(&tid->buf_q)) + ath_tx_queue_tid(txq, tid); - if (tid == last_tid || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) - break; - } + break; + } while (!list_empty(&ac->tid_q)); - if (!list_empty(&ac->tid_q)) { - if (!ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, &txq->axq_acq); - } + if (!list_empty(&ac->tid_q)) { + if (!ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); } - - if (ac == last_ac || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) - return; } } @@ -1316,7 +1301,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); - TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); @@ -1324,7 +1308,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, list_splice_tail_init(head, &txq->axq_q); if (txq->axq_link == NULL) { - TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), @@ -1338,7 +1321,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, } ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, &txq->axq_link); - TX_STAT_INC(txq->axq_qnum, txstart); ath9k_hw_txstart(ah, txq->axq_qnum); } txq->axq_depth++; @@ -1353,6 +1335,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, struct list_head bf_head; bf->bf_state.bf_type |= BUF_AMPDU; + TX_STAT_INC(txctl->txq->axq_qnum, a_queued); /* * Do not queue to h/w when any of the following conditions is true: @@ -1368,7 +1351,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, * Add this frame to software queue for scheduling later * for aggregation. */ - TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); list_add_tail(&bf->list, &tid->buf_q); ath_tx_queue_tid(txctl->txq, tid); return; @@ -1382,7 +1364,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, ath_tx_addto_baw(sc, tid, fi->seqno); /* Queue to h/w without aggregation */ - TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); bf->bf_lastbf = bf; ath_buf_set_rate(sc, bf, fi->framelen); ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); @@ -1985,16 +1966,17 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; } -/* Has no locking. Must hold spin_lock_bh(&txq->axq_lock) - * before calling this. - */ -static void __ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) +static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum) { - if (txq->mac80211_qnum >= 0 && - txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { - if (ath_mac80211_start_queue(sc, txq->mac80211_qnum)) + struct ath_txq *txq; + + txq = sc->tx.txq_map[qnum]; + spin_lock_bh(&txq->axq_lock); + if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { + if (ath_mac80211_start_queue(sc, qnum)) txq->stopped = 0; } + spin_unlock_bh(&txq->axq_lock); } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) @@ -2017,8 +1999,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) spin_lock_bh(&txq->axq_lock); if (list_empty(&txq->axq_q)) { txq->axq_link = NULL; - if (sc->sc_flags & SC_OP_TXAGGR) - ath_txq_schedule(sc, txq); spin_unlock_bh(&txq->axq_lock); break; } @@ -2053,7 +2033,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) spin_unlock_bh(&txq->axq_lock); break; } - TX_STAT_INC(txq->axq_qnum, txprocdesc); /* * Remove ath_buf's of the same transmit unit from txq, @@ -2097,9 +2076,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) else ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); - spin_lock_bh(&txq->axq_lock); - __ath_wake_mac80211_queue(sc, txq); + if (txq == sc->tx.txq_map[qnum]) + ath_wake_mac80211_queue(sc, qnum); + spin_lock_bh(&txq->axq_lock); if (sc->sc_flags & SC_OP_TXAGGR) ath_txq_schedule(sc, txq); spin_unlock_bh(&txq->axq_lock); @@ -2113,9 +2093,6 @@ static void ath_tx_complete_poll_work(struct work_struct *work) struct ath_txq *txq; int i; bool needreset = false; -#ifdef CONFIG_ATH9K_DEBUGFS - sc->tx_complete_poll_work_seen++; -#endif for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) { @@ -2129,34 +2106,6 @@ static void ath_tx_complete_poll_work(struct work_struct *work) } else { txq->axq_tx_inprogress = true; } - } else { - /* If the queue has pending buffers, then it - * should be doing tx work (and have axq_depth). - * Shouldn't get to this state I think..but - * we do. - */ - if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && - (txq->pending_frames > 0 || - !list_empty(&txq->axq_acq) || - txq->stopped)) { - ath_err(ath9k_hw_common(sc->sc_ah), - "txq: %p axq_qnum: %u," - " mac80211_qnum: %i" - " axq_link: %p" - " pending frames: %i" - " axq_acq empty: %i" - " stopped: %i" - " axq_depth: 0 Attempting to" - " restart tx logic.\n", - txq, txq->axq_qnum, - txq->mac80211_qnum, - txq->axq_link, - txq->pending_frames, - list_empty(&txq->axq_acq), - txq->stopped); - __ath_wake_mac80211_queue(sc, txq); - ath_txq_schedule(sc, txq); - } } spin_unlock_bh(&txq->axq_lock); } @@ -2164,7 +2113,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work) if (needreset) { ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, "tx hung, resetting the chip\n"); + ath9k_ps_wakeup(sc); ath_reset(sc, true); + ath9k_ps_restore(sc); } ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, @@ -2251,9 +2202,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) ath_tx_complete_buf(sc, bf, txq, &bf_head, &txs, txok, 0); - spin_lock_bh(&txq->axq_lock); - __ath_wake_mac80211_queue(sc, txq); + if (txq == sc->tx.txq_map[qnum]) + ath_wake_mac80211_queue(sc, qnum); + spin_lock_bh(&txq->axq_lock); if (!list_empty(&txq->txq_fifo_pending)) { INIT_LIST_HEAD(&bf_head); bf = list_first_entry(&txq->txq_fifo_pending, diff --git a/trunk/drivers/net/wireless/ath/carl9170/carl9170.h b/trunk/drivers/net/wireless/ath/carl9170/carl9170.h index 420d437f9580..d07ff7f2fd92 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/trunk/drivers/net/wireless/ath/carl9170/carl9170.h @@ -283,7 +283,6 @@ struct ar9170 { unsigned int mem_blocks; unsigned int mem_block_size; unsigned int rx_size; - unsigned int tx_seq_table; } fw; /* reset / stuck frames/queue detection */ diff --git a/trunk/drivers/net/wireless/ath/carl9170/fw.c b/trunk/drivers/net/wireless/ath/carl9170/fw.c index 9517ede9e2df..546b4e4ec5ea 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/fw.c +++ b/trunk/drivers/net/wireless/ath/carl9170/fw.c @@ -150,7 +150,6 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) const struct carl9170fw_otus_desc *otus_desc; const struct carl9170fw_chk_desc *chk_desc; const struct carl9170fw_last_desc *last_desc; - const struct carl9170fw_txsq_desc *txsq_desc; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); @@ -265,9 +264,6 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) FIF_PROMISC_IN_BSS; } - if (SUPP(CARL9170FW_WOL)) - device_set_wakeup_enable(&ar->udev->dev, true); - ar->fw.vif_num = otus_desc->vif_num; ar->fw.cmd_bufs = otus_desc->cmd_bufs; ar->fw.address = le32_to_cpu(otus_desc->fw_address); @@ -300,17 +296,6 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) } } - txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, - sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); - - if (txsq_desc) { - ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); - if (!valid_cpu_addr(ar->fw.tx_seq_table)) - return -EINVAL; - } else { - ar->fw.tx_seq_table = 0; - } - #undef SUPPORTED return 0; } diff --git a/trunk/drivers/net/wireless/ath/carl9170/fwcmd.h b/trunk/drivers/net/wireless/ath/carl9170/fwcmd.h index 30449d21b762..3680dfc70f46 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/fwcmd.h +++ b/trunk/drivers/net/wireless/ath/carl9170/fwcmd.h @@ -167,7 +167,6 @@ struct carl9170_rx_filter_cmd { #define CARL9170_RX_FILTER_CTL_BACKR 0x20 #define CARL9170_RX_FILTER_MGMT 0x40 #define CARL9170_RX_FILTER_DATA 0x80 -#define CARL9170_RX_FILTER_EVERYTHING (~0) struct carl9170_bcn_ctrl_cmd { __le32 vif_id; diff --git a/trunk/drivers/net/wireless/ath/carl9170/fwdesc.h b/trunk/drivers/net/wireless/ath/carl9170/fwdesc.h index 921066822dd5..71f3821f6058 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/fwdesc.h +++ b/trunk/drivers/net/wireless/ath/carl9170/fwdesc.h @@ -69,9 +69,6 @@ enum carl9170fw_feature_list { /* Firmware RX filter | CARL9170_CMD_RX_FILTER */ CARL9170FW_RX_FILTER, - /* Wake up on WLAN */ - CARL9170FW_WOL, - /* KEEP LAST */ __CARL9170FW_FEATURE_NUM }; @@ -81,7 +78,6 @@ enum carl9170fw_feature_list { #define FIX_MAGIC "FIX\0" #define DBG_MAGIC "DBG\0" #define CHK_MAGIC "CHK\0" -#define TXSQ_MAGIC "TXSQ" #define LAST_MAGIC "LAST" #define CARL9170FW_SET_DAY(d) (((d) - 1) % 31) @@ -92,10 +88,8 @@ enum carl9170fw_feature_list { #define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1) #define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10) -#define CARL9170FW_MAGIC_SIZE 4 - struct carl9170fw_desc_head { - u8 magic[CARL9170FW_MAGIC_SIZE]; + u8 magic[4]; __le16 length; u8 min_ver; u8 cur_ver; @@ -176,16 +170,6 @@ struct carl9170fw_chk_desc { #define CARL9170FW_CHK_DESC_SIZE \ (sizeof(struct carl9170fw_chk_desc)) -#define CARL9170FW_TXSQ_DESC_MIN_VER 1 -#define CARL9170FW_TXSQ_DESC_CUR_VER 1 -struct carl9170fw_txsq_desc { - struct carl9170fw_desc_head head; - - __le32 seq_table_addr; -} __packed; -#define CARL9170FW_TXSQ_DESC_SIZE \ - (sizeof(struct carl9170fw_txsq_desc)) - #define CARL9170FW_LAST_DESC_MIN_VER 1 #define CARL9170FW_LAST_DESC_CUR_VER 2 struct carl9170fw_last_desc { @@ -205,8 +189,8 @@ struct carl9170fw_last_desc { } static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, - u8 magic[CARL9170FW_MAGIC_SIZE], - __le16 length, u8 min_ver, u8 cur_ver) + u8 magic[4], __le16 length, + u8 min_ver, u8 cur_ver) { head->magic[0] = magic[0]; head->magic[1] = magic[1]; @@ -220,7 +204,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, #define carl9170fw_for_each_hdr(desc, fw_desc) \ for (desc = fw_desc; \ - memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \ + memcmp(desc->magic, LAST_MAGIC, 4) && \ le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \ le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \ desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length))) @@ -234,8 +218,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature) } static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, - const u8 descid[CARL9170FW_MAGIC_SIZE], - u16 min_len, u8 compatible_revision) + const u8 descid[4], u16 min_len, + u8 compatible_revision) { if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && descid[2] == head->magic[2] && descid[3] == head->magic[3] && diff --git a/trunk/drivers/net/wireless/ath/carl9170/hw.h b/trunk/drivers/net/wireless/ath/carl9170/hw.h index 4e30762dd903..e85df6edfed3 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/hw.h +++ b/trunk/drivers/net/wireless/ath/carl9170/hw.h @@ -463,8 +463,6 @@ #define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010) #define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014) -#define AR9170_PWR_PLL_ADDAC_DIV_S 2 -#define AR9170_PWR_PLL_ADDAC_DIV 0xffc #define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020) /* Faraday USB Controller */ @@ -473,9 +471,6 @@ #define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000) #define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0) #define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2) -#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3) -#define AR9170_USB_MAIN_CTRL_RESET BIT(4) -#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5) #define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6) #define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001) @@ -504,13 +499,6 @@ #define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020) #define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021) -#define AR9170_USB_INTR_SRC0_SETUP BIT(0) -#define AR9170_USB_INTR_SRC0_IN BIT(1) -#define AR9170_USB_INTR_SRC0_OUT BIT(2) -#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */ -#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */ -#define AR9170_USB_INTR_SRC0_ABORT BIT(7) - #define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022) #define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023) #define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024) @@ -518,15 +506,6 @@ #define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026) #define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027) #define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028) -#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1) -#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2) -#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3) -#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4) -#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5) -#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6) -#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7) - -#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f) #define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030) #define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030) @@ -602,10 +581,6 @@ #define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) #define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) - -#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120) -#define AR9170_USB_WAKE_UP_WAKE BIT(0) - #define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0) #define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1)) diff --git a/trunk/drivers/net/wireless/ath/carl9170/main.c b/trunk/drivers/net/wireless/ath/carl9170/main.c index ede3d7e5a048..870df8c42622 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/main.c +++ b/trunk/drivers/net/wireless/ath/carl9170/main.c @@ -662,13 +662,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, goto unlock; } - if (ar->fw.tx_seq_table) { - err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4, - 0); - if (err) - goto unlock; - } - unlock: if (err && (vif_id >= 0)) { vif_priv->active = false; @@ -1286,7 +1279,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size) + u16 tid, u16 *ssn) { struct ar9170 *ar = hw->priv; struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; diff --git a/trunk/drivers/net/wireless/ath/carl9170/tx.c b/trunk/drivers/net/wireless/ath/carl9170/tx.c index 6f41e21d3a1c..6cc58e052d10 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/tx.c +++ b/trunk/drivers/net/wireless/ath/carl9170/tx.c @@ -862,9 +862,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; - if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) - txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ; - if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; diff --git a/trunk/drivers/net/wireless/ath/carl9170/version.h b/trunk/drivers/net/wireless/ath/carl9170/version.h index 15095c035169..ee0f84f2a2f6 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/version.h +++ b/trunk/drivers/net/wireless/ath/carl9170/version.h @@ -1,7 +1,7 @@ #ifndef __CARL9170_SHARED_VERSION_H #define __CARL9170_SHARED_VERSION_H -#define CARL9170FW_VERSION_YEAR 11 -#define CARL9170FW_VERSION_MONTH 1 -#define CARL9170FW_VERSION_DAY 22 -#define CARL9170FW_VERSION_GIT "1.9.2" +#define CARL9170FW_VERSION_YEAR 10 +#define CARL9170FW_VERSION_MONTH 10 +#define CARL9170FW_VERSION_DAY 29 +#define CARL9170FW_VERSION_GIT "1.9.0" #endif /* __CARL9170_SHARED_VERSION_H */ diff --git a/trunk/drivers/net/wireless/ath/carl9170/wlan.h b/trunk/drivers/net/wireless/ath/carl9170/wlan.h index 9e1324b67e08..24d63b583b6b 100644 --- a/trunk/drivers/net/wireless/ath/carl9170/wlan.h +++ b/trunk/drivers/net/wireless/ath/carl9170/wlan.h @@ -251,7 +251,7 @@ struct carl9170_tx_superdesc { u8 ampdu_commit_factor:1; u8 ampdu_unused_bit:1; u8 queue:2; - u8 assign_seq:1; + u8 reserved:1; u8 vif_id:3; u8 fill_in_tsf:1; u8 cab:1; @@ -299,7 +299,6 @@ struct _ar9170_tx_hwdesc { #define CARL9170_TX_SUPER_MISC_QUEUE 0x3 #define CARL9170_TX_SUPER_MISC_QUEUE_S 0 -#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4 #define CARL9170_TX_SUPER_MISC_VIF_ID 0x38 #define CARL9170_TX_SUPER_MISC_VIF_ID_S 3 #define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40 @@ -414,23 +413,6 @@ enum ar9170_txq { __AR9170_NUM_TXQ, }; -/* - * This is an workaround for several undocumented bugs. - * Don't mess with the QoS/AC <-> HW Queue map, if you don't - * know what you are doing. - * - * Known problems [hardware]: - * * The MAC does not aggregate frames on anything other - * than the first HW queue. - * * when an AMPDU is placed [in the first hw queue] and - * additional frames are already queued on a different - * hw queue, the MAC will ALWAYS freeze. - * - * In a nutshell: The hardware can either do QoS or - * Aggregation but not both at the same time. As a - * result, this makes the device pretty much useless - * for any serious 802.11n setup. - */ static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 }; #define AR9170_TXQ_DEPTH 32 diff --git a/trunk/drivers/net/wireless/iwlwifi/Kconfig b/trunk/drivers/net/wireless/iwlwifi/Kconfig index e1e3b1cf3cff..ed424574160e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Kconfig +++ b/trunk/drivers/net/wireless/iwlwifi/Kconfig @@ -2,10 +2,6 @@ config IWLWIFI tristate "Intel Wireless Wifi" depends on PCI && MAC80211 select FW_LOADER - select NEW_LEDS - select LEDS_CLASS - select LEDS_TRIGGERS - select MAC80211_LEDS menu "Debugging Options" depends on IWLWIFI @@ -110,27 +106,9 @@ config IWL5000 Intel WiFi Link 1000BGN Intel Wireless WiFi 5150AGN Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN - Intel 6005 Series Wi-Fi Adapters - Intel 6030 Series Wi-Fi Adapters - Intel Wireless WiFi Link 6150BGN 2 Adapter + Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B) + Intel WIreless WiFi Link 6050BGN Gen 2 Adapter Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) - Intel 2000 Series Wi-Fi Adapters - -config IWL_P2P - bool "iwlwifi experimental P2P support" - depends on IWL5000 - help - This option enables experimental P2P support for some devices - based on microcode support. Since P2P support is still under - development, this option may even enable it for some devices - now that turn out to not support it in the future due to - microcode restrictions. - - To determine if your microcode supports the experimental P2P - offered by this option, check if the driver advertises AP - support when it is loaded. - - Say Y only if you want to experiment with P2P. config IWL3945 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" diff --git a/trunk/drivers/net/wireless/iwlwifi/Makefile b/trunk/drivers/net/wireless/iwlwifi/Makefile index 25be742c69c9..93380f97835f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Makefile +++ b/trunk/drivers/net/wireless/iwlwifi/Makefile @@ -26,7 +26,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o iwlagn-$(CONFIG_IWL5000) += iwl-5000.o iwlagn-$(CONFIG_IWL5000) += iwl-6000.o iwlagn-$(CONFIG_IWL5000) += iwl-1000.o -iwlagn-$(CONFIG_IWL5000) += iwl-2000.o # 3945 obj-$(CONFIG_IWL3945) += iwl3945.o diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c index 127723e6319f..ba78bc8a259f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -270,7 +270,6 @@ static struct iwl_base_params iwl1000_base_params = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, - .supports_idle = true, }; static struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c deleted file mode 100644 index 3c9e1b5724c7..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-2000.c +++ /dev/null @@ -1,556 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "iwl-eeprom.h" -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-sta.h" -#include "iwl-agn.h" -#include "iwl-helpers.h" -#include "iwl-agn-hw.h" -#include "iwl-6000-hw.h" -#include "iwl-agn-led.h" -#include "iwl-agn-debugfs.h" - -/* Highest firmware API version supported */ -#define IWL2030_UCODE_API_MAX 5 -#define IWL2000_UCODE_API_MAX 5 -#define IWL200_UCODE_API_MAX 5 - -/* Lowest firmware API version supported */ -#define IWL2030_UCODE_API_MIN 5 -#define IWL2000_UCODE_API_MIN 5 -#define IWL200_UCODE_API_MIN 5 - -#define IWL2030_FW_PRE "iwlwifi-2030-" -#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode" -#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api) - -#define IWL2000_FW_PRE "iwlwifi-2000-" -#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode" -#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api) - -#define IWL200_FW_PRE "iwlwifi-200-" -#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode" -#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api) - -static void iwl2000_set_ct_threshold(struct iwl_priv *priv) -{ - /* want Celsius */ - priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; - priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; -} - -/* NIC configuration for 2000 series */ -static void iwl2000_nic_config(struct iwl_priv *priv) -{ - u16 radio_cfg; - - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); - - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); - -} - -static struct iwl_sensitivity_ranges iwl2000_sensitivity = { - .min_nrg_cck = 97, - .max_nrg_cck = 0, /* not used, set to 0 */ - .auto_corr_min_ofdm = 80, - .auto_corr_min_ofdm_mrc = 128, - .auto_corr_min_ofdm_x1 = 105, - .auto_corr_min_ofdm_mrc_x1 = 192, - - .auto_corr_max_ofdm = 145, - .auto_corr_max_ofdm_mrc = 232, - .auto_corr_max_ofdm_x1 = 110, - .auto_corr_max_ofdm_mrc_x1 = 232, - - .auto_corr_min_cck = 125, - .auto_corr_max_cck = 175, - .auto_corr_min_cck_mrc = 160, - .auto_corr_max_cck_mrc = 310, - .nrg_th_cck = 97, - .nrg_th_ofdm = 100, - - .barker_corr_th_min = 190, - .barker_corr_th_min_mrc = 390, - .nrg_th_cca = 62, -}; - -static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) -{ - if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && - priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->base_params->num_of_queues = - priv->cfg->mod_params->num_of_queues; - - priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; - priv->hw_params.scd_bc_tbls_size = - priv->cfg->base_params->num_of_queues * - sizeof(struct iwlagn_scd_bc_tbl); - priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWLAGN_STATION_COUNT; - priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; - - priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; - priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; - - priv->hw_params.max_bsm_size = 0; - priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | - BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; - - priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - if (priv->cfg->rx_with_siso_diversity) - priv->hw_params.rx_chains_num = 1; - else - priv->hw_params.rx_chains_num = - num_of_ant(priv->cfg->valid_rx_ant); - priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; - priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; - - iwl2000_set_ct_threshold(priv); - - /* Set initial sensitivity parameters */ - /* Set initial calibration set */ - priv->hw_params.sens = &iwl2000_sensitivity; - priv->hw_params.calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_BASE_BAND); - if (priv->cfg->need_dc_calib) - priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX); - if (priv->cfg->need_temp_offset_calib) - priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); - - priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; - - return 0; -} - -static int iwl2030_hw_channel_switch(struct iwl_priv *priv, - struct ieee80211_channel_switch *ch_switch) -{ - /* - * MULTI-FIXME - * See iwl_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl6000_channel_switch_cmd cmd; - const struct iwl_channel_info *ch_info; - u32 switch_time_in_usec, ucode_switch_time; - u16 ch; - u32 tsf_low; - u8 switch_count; - u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); - struct ieee80211_vif *vif = ctx->vif; - struct iwl_host_cmd hcmd = { - .id = REPLY_CHANNEL_SWITCH, - .len = sizeof(cmd), - .flags = CMD_SYNC, - .data = &cmd, - }; - - cmd.band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->channel->hw_value; - IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", - ctx->active.channel, ch); - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; - switch_count = ch_switch->count; - tsf_low = ch_switch->timestamp & 0x0ffffffff; - /* - * calculate the ucode channel switch time - * adding TSF as one of the factor for when to switch - */ - if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { - if (switch_count > ((priv->ucode_beacon_time - tsf_low) / - beacon_interval)) { - switch_count -= (priv->ucode_beacon_time - - tsf_low) / beacon_interval; - } else - switch_count = 0; - } - if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - else { - switch_time_in_usec = - vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, - switch_time_in_usec, - beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, - priv->ucode_beacon_time, - ucode_switch_time, - beacon_interval); - } - IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); - ch_info = iwl_get_channel_info(priv, priv->band, ch); - if (ch_info) - cmd.expect_beacon = is_channel_radar(ch_info); - else { - IWL_ERR(priv, "invalid channel switch from %u to %u\n", - ctx->active.channel, ch); - return -EFAULT; - } - priv->switch_rxon.channel = cmd.channel; - priv->switch_rxon.switch_in_progress = true; - - return iwl_send_cmd_sync(priv, &hcmd); -} - -static struct iwl_lib_ops iwl2000_lib = { - .set_hw_params = iwl2000_hw_set_hw_params, - .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, - .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_agg_enable = iwlagn_txq_agg_enable, - .txq_agg_disable = iwlagn_txq_agg_disable, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .load_ucode = iwlagn_load_ucode, - .dump_nic_event_log = iwl_dump_nic_event_log, - .dump_nic_error_log = iwl_dump_nic_error_log, - .dump_csr = iwl_dump_csr, - .dump_fh = iwl_dump_fh, - .init_alive_start = iwlagn_init_alive_start, - .alive_notify = iwlagn_alive_notify, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, - .set_channel_switch = iwl2030_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl2000_nic_config, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, - .release_semaphore = iwlcore_eeprom_release_semaphore, - .calib_version = iwlagn_eeprom_calib_version, - .query_addr = iwlagn_eeprom_query_addr, - .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, - }, - .isr_ops = { - .isr = iwl_isr_ict, - .free = iwl_free_isr_ict, - .alloc = iwl_alloc_isr_ict, - .reset = iwl_reset_ict, - .disable = iwl_disable_ict, - }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .debugfs_ops = { - .rx_stats_read = iwl_ucode_rx_stats_read, - .tx_stats_read = iwl_ucode_tx_stats_read, - .general_stats_read = iwl_ucode_general_stats_read, - .bt_stats_read = iwl_ucode_bt_stats_read, - .reply_tx_error = iwl_reply_tx_error_read, - }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, - .tt_ops = { - .lower_power_detection = iwl_tt_is_low_power_state, - .tt_power_mode = iwl_tt_current_power_mode, - .ct_kill_check = iwl_check_for_ct_kill, - } -}; - -static const struct iwl_ops iwl2000_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, - .led = &iwlagn_led_ops, - .ieee80211_ops = &iwlagn_hw_ops, -}; - -static const struct iwl_ops iwl2030_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_bt_hcmd, - .utils = &iwlagn_hcmd_utils, - .led = &iwlagn_led_ops, - .ieee80211_ops = &iwlagn_hw_ops, -}; - -static const struct iwl_ops iwl200_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, - .led = &iwlagn_led_ops, - .ieee80211_ops = &iwlagn_hw_ops, -}; - -static const struct iwl_ops iwl230_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_bt_hcmd, - .utils = &iwlagn_hcmd_utils, - .led = &iwlagn_led_ops, - .ieee80211_ops = &iwlagn_hw_ops, -}; - -static struct iwl_base_params iwl2000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, - .pll_cfg_val = 0, - .set_l0s = true, - .use_bsm = false, - .max_ll_items = OTP_MAX_LL_ITEMS_2x00, - .shadow_ram_support = true, - .led_compensation = 51, - .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, - .supports_idle = true, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .wd_timeout = IWL_DEF_WD_TIMEOUT, - .max_event_log_size = 512, - .ucode_tracing = true, - .sensitivity_calib_by_driver = true, - .chain_noise_calib_by_driver = true, - .shadow_reg_enable = true, -}; - - -static struct iwl_base_params iwl2030_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, - .num_of_queues = IWLAGN_NUM_QUEUES, - .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, - .pll_cfg_val = 0, - .set_l0s = true, - .use_bsm = false, - .max_ll_items = OTP_MAX_LL_ITEMS_2x00, - .shadow_ram_support = true, - .led_compensation = 57, - .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, - .supports_idle = true, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .ucode_tracing = true, - .sensitivity_calib_by_driver = true, - .chain_noise_calib_by_driver = true, - .shadow_reg_enable = true, -}; - -static struct iwl_ht_params iwl2000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ -}; - -static struct iwl_bt_params iwl2030_bt_params = { - .bt_statistics = true, - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, - .bt_sco_disable = true, -}; - -#define IWL_DEVICE_2000 \ - .fw_name_pre = IWL2000_FW_PRE, \ - .ucode_api_max = IWL2000_UCODE_API_MAX, \ - .ucode_api_min = IWL2000_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl2000_ops, \ - .mod_params = &iwlagn_mod_params, \ - .base_params = &iwl2000_base_params, \ - .need_dc_calib = true, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE \ - -struct iwl_cfg iwl2000_2bgn_cfg = { - .name = "2000 Series 2x2 BGN", - IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, -}; - -struct iwl_cfg iwl2000_2bg_cfg = { - .name = "2000 Series 2x2 BG", - IWL_DEVICE_2000, -}; - -#define IWL_DEVICE_2030 \ - .fw_name_pre = IWL2030_FW_PRE, \ - .ucode_api_max = IWL2030_UCODE_API_MAX, \ - .ucode_api_min = IWL2030_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl2030_ops, \ - .mod_params = &iwlagn_mod_params, \ - .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ - .need_dc_calib = true, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ - -struct iwl_cfg iwl2030_2bgn_cfg = { - .name = "2000 Series 2x2 BGN/BT", - IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, -}; - -struct iwl_cfg iwl2030_2bg_cfg = { - .name = "2000 Series 2x2 BG/BT", - IWL_DEVICE_2000, -}; - -#define IWL_DEVICE_6035 \ - .fw_name_pre = IWL2030_FW_PRE, \ - .ucode_api_max = IWL2030_UCODE_API_MAX, \ - .ucode_api_min = IWL2030_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \ - .ops = &iwl2030_ops, \ - .mod_params = &iwlagn_mod_params, \ - .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ - .need_dc_calib = true, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ - -struct iwl_cfg iwl6035_2agn_cfg = { - .name = "2000 Series 2x2 AGN/BT", - IWL_DEVICE_6035, - .ht_params = &iwl2000_ht_params, -}; - -struct iwl_cfg iwl6035_2abg_cfg = { - .name = "2000 Series 2x2 ABG/BT", - IWL_DEVICE_6035, -}; - -struct iwl_cfg iwl6035_2bg_cfg = { - .name = "2000 Series 2x2 BG/BT", - IWL_DEVICE_6035, -}; - -#define IWL_DEVICE_200 \ - .fw_name_pre = IWL200_FW_PRE, \ - .ucode_api_max = IWL200_UCODE_API_MAX, \ - .ucode_api_min = IWL200_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl200_ops, \ - .mod_params = &iwlagn_mod_params, \ - .base_params = &iwl2000_base_params, \ - .need_dc_calib = true, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ - .rx_with_siso_diversity = true \ - -struct iwl_cfg iwl200_bg_cfg = { - .name = "200 Series 1x1 BG", - IWL_DEVICE_200, -}; - -struct iwl_cfg iwl200_bgn_cfg = { - .name = "200 Series 1x1 BGN", - IWL_DEVICE_200, - .ht_params = &iwl2000_ht_params, -}; - -#define IWL_DEVICE_230 \ - .fw_name_pre = IWL200_FW_PRE, \ - .ucode_api_max = IWL200_UCODE_API_MAX, \ - .ucode_api_min = IWL200_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl230_ops, \ - .mod_params = &iwlagn_mod_params, \ - .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ - .need_dc_calib = true, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ - .rx_with_siso_diversity = true \ - -struct iwl_cfg iwl230_bg_cfg = { - .name = "200 Series 1x1 BG/BT", - IWL_DEVICE_230, -}; - -struct iwl_cfg iwl230_bgn_cfg = { - .name = "200 Series 1x1 BGN/BT", - IWL_DEVICE_230, - .ht_params = &iwl2000_ht_params, -}; - -MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX)); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/trunk/drivers/net/wireless/iwlwifi/iwl-3945-led.c index dc7c3a4167a9..abe2b739c4dc 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-3945-led.c @@ -59,6 +59,33 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv, return iwl_send_cmd(priv, &cmd); } +/* Set led on command */ +static int iwl3945_led_on(struct iwl_priv *priv) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .on = IWL_LED_SOLID, + .off = 0, + .interval = IWL_DEF_LED_INTRVL + }; + return iwl3945_send_led_cmd(priv, &led_cmd); +} + +/* Set led off command */ +static int iwl3945_led_off(struct iwl_priv *priv) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .on = 0, + .off = 0, + .interval = IWL_DEF_LED_INTRVL + }; + IWL_DEBUG_LED(priv, "led off\n"); + return iwl3945_send_led_cmd(priv, &led_cmd); +} + const struct iwl_led_ops iwl3945_led_ops = { .cmd = iwl3945_send_led_cmd, + .on = iwl3945_led_on, + .off = iwl3945_led_off, }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c index 1d9dcd7e3b82..a9b852be4509 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -594,11 +594,10 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, rx_status.flag = 0; rx_status.mactime = le64_to_cpu(rx_end->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), - rx_status.band); rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); if (rx_status.band == IEEE80211_BAND_5GHZ) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c index 313e92ed568b..3f1e5f1bf847 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -2316,11 +2316,6 @@ static void iwl4965_rx_handler_setup(struct iwl_priv *priv) priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; /* Tx response */ priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; - - /* set up notification wait support */ - spin_lock_init(&priv->_agn.notif_wait_lock); - INIT_LIST_HEAD(&priv->_agn.notif_waits); - init_waitqueue_head(&priv->_agn.notif_waitq); } static void iwl4965_setup_deferred_work(struct iwl_priv *priv) @@ -2629,7 +2624,6 @@ struct iwl_cfg iwl4965_agn_cfg = { .fw_name_pre = IWL4965_FW_PRE, .ucode_api_max = IWL4965_UCODE_API_MAX, .ucode_api_min = IWL4965_UCODE_API_MIN, - .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, .valid_tx_ant = ANT_AB, .valid_rx_ant = ANT_ABC, .eeprom_ver = EEPROM_4965_EEPROM_VERSION, diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c index c195674454f4..af505bcd7ae0 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -67,13 +67,13 @@ #define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" #define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) -#define IWL6005_FW_PRE "iwlwifi-6000g2a-" -#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode" -#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api) +#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-" +#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" +#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) -#define IWL6030_FW_PRE "iwlwifi-6000g2b-" -#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode" -#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api) +#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-" +#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" +#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) static void iwl6000_set_ct_threshold(struct iwl_priv *priv) { @@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv) CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); } -static void iwl6150_additional_nic_config(struct iwl_priv *priv) +static void iwl6050g2_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) @@ -354,7 +354,7 @@ static struct iwl_lib_ops iwl6000_lib = { } }; -static struct iwl_lib_ops iwl6030_lib = { +static struct iwl_lib_ops iwl6000g2b_lib = { .set_hw_params = iwl6000_hw_set_hw_params, .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, @@ -430,8 +430,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = { .additional_nic_config = &iwl6050_additional_nic_config, }; -static struct iwl_nic_ops iwl6150_nic_ops = { - .additional_nic_config = &iwl6150_additional_nic_config, +static struct iwl_nic_ops iwl6050g2_nic_ops = { + .additional_nic_config = &iwl6050g2_additional_nic_config, }; static const struct iwl_ops iwl6000_ops = { @@ -451,17 +451,17 @@ static const struct iwl_ops iwl6050_ops = { .ieee80211_ops = &iwlagn_hw_ops, }; -static const struct iwl_ops iwl6150_ops = { +static const struct iwl_ops iwl6050g2_ops = { .lib = &iwl6000_lib, .hcmd = &iwlagn_hcmd, .utils = &iwlagn_hcmd_utils, .led = &iwlagn_led_ops, - .nic = &iwl6150_nic_ops, + .nic = &iwl6050g2_nic_ops, .ieee80211_ops = &iwlagn_hw_ops, }; -static const struct iwl_ops iwl6030_ops = { - .lib = &iwl6030_lib, +static const struct iwl_ops iwl6000g2b_ops = { + .lib = &iwl6000g2b_lib, .hcmd = &iwlagn_bt_hcmd, .utils = &iwlagn_hcmd_utils, .led = &iwlagn_led_ops, @@ -555,11 +555,11 @@ static struct iwl_bt_params iwl6000_bt_params = { }; #define IWL_DEVICE_6005 \ - .fw_name_pre = IWL6005_FW_PRE, \ + .fw_name_pre = IWL6000G2A_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ .ops = &iwl6000_ops, \ .mod_params = &iwlagn_mod_params, \ .base_params = &iwl6000_g2_base_params, \ @@ -584,12 +584,12 @@ struct iwl_cfg iwl6005_2bg_cfg = { }; #define IWL_DEVICE_6030 \ - .fw_name_pre = IWL6030_FW_PRE, \ + .fw_name_pre = IWL6000G2B_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .ops = &iwl6030_ops, \ + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ + .ops = &iwl6000g2b_ops, \ .mod_params = &iwlagn_mod_params, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ @@ -706,9 +706,9 @@ struct iwl_cfg iwl6150_bgn_cfg = { .fw_name_pre = IWL6050_FW_PRE, .ucode_api_max = IWL6050_UCODE_API_MAX, .ucode_api_min = IWL6050_UCODE_API_MIN, - .eeprom_ver = EEPROM_6150_EEPROM_VERSION, - .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, - .ops = &iwl6150_ops, + .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, + .ops = &iwl6050g2_ops, .mod_params = &iwlagn_mod_params, .base_params = &iwl6050_base_params, .ht_params = &iwl6000_ht_params, @@ -734,5 +734,5 @@ struct iwl_cfg iwl6000_3agn_cfg = { MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c index 27b5a3eec9dc..14ceb4df72f6 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c @@ -152,14 +152,11 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); - if (!priv->cfg->sku) { - /* not using sku overwrite */ - priv->cfg->sku = - ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> + priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> EEPROM_SKU_CAP_BAND_POS); - if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) - priv->cfg->sku |= IWL_SKU_N; - } + if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) + priv->cfg->sku |= IWL_SKU_N; + if (!priv->cfg->sku) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c index 41543ad4cb84..366340f3fb0f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c @@ -305,11 +305,7 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) cmd.slots[0].type = 0; /* BSS */ cmd.slots[1].type = 1; /* PAN */ - if (priv->_agn.hw_roc_channel) { - /* both contexts must be used for this to happen */ - slot1 = priv->_agn.hw_roc_duration; - slot0 = IWL_MIN_SLOT_TIME; - } else if (ctx_bss->vif && ctx_pan->vif) { + if (ctx_bss->vif && ctx_pan->vif) { int bcnint = ctx_pan->vif->bss_conf.beacon_int; int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; @@ -334,12 +330,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) if (test_bit(STATUS_SCAN_HW, &priv->status) || (!ctx_bss->vif->bss_conf.idle && !ctx_bss->vif->bss_conf.assoc)) { - slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; + slot0 = dtim * bcnint * 3 - 20; + slot1 = 20; } else if (!ctx_pan->vif->bss_conf.idle && !ctx_pan->vif->bss_conf.assoc) { - slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME; - slot0 = IWL_MIN_SLOT_TIME; + slot1 = bcnint * 3 - 20; + slot0 = 20; } } else if (ctx_pan->vif) { slot0 = 0; @@ -348,8 +344,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); if (test_bit(STATUS_SCAN_HW, &priv->status)) { - slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; + slot0 = slot1 * 3 - 20; + slot1 = 20; } } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.c index c1190d965614..1a24946bc203 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.c @@ -63,11 +63,23 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) } /* Set led register off */ -void iwlagn_led_enable(struct iwl_priv *priv) +static int iwl_led_on_reg(struct iwl_priv *priv) { + IWL_DEBUG_LED(priv, "led on\n"); iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); + return 0; +} + +/* Set led register off */ +static int iwl_led_off_reg(struct iwl_priv *priv) +{ + IWL_DEBUG_LED(priv, "LED Reg off\n"); + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); + return 0; } const struct iwl_led_ops iwlagn_led_ops = { .cmd = iwl_send_led_cmd, + .on = iwl_led_on_reg, + .off = iwl_led_off_reg, }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.h index 96f323dc5dd6..a594e4fdc6b8 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-led.h @@ -28,6 +28,5 @@ #define __iwl_agn_led_h__ extern const struct iwl_led_ops iwlagn_led_ops; -void iwlagn_led_enable(struct iwl_priv *priv); #endif /* __iwl_agn_led_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index c7d03874b380..3dee87e8f55d 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -473,11 +473,6 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv) priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = iwlagn_rx_calib_complete; priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; - - /* set up notification wait support */ - spin_lock_init(&priv->_agn.notif_wait_lock); - INIT_LIST_HEAD(&priv->_agn.notif_waits); - init_waitqueue_head(&priv->_agn.notif_waitq); } void iwlagn_setup_deferred_work(struct iwl_priv *priv) @@ -1162,11 +1157,10 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), - rx_status.band); rx_status.rate_idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); rx_status.flag = 0; @@ -2395,44 +2389,3 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display) } return 0; } - -/* notification wait support */ -void iwlagn_init_notification_wait(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - void (*fn)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt), - u8 cmd) -{ - wait_entry->fn = fn; - wait_entry->cmd = cmd; - wait_entry->triggered = false; - - spin_lock_bh(&priv->_agn.notif_wait_lock); - list_add(&wait_entry->list, &priv->_agn.notif_waits); - spin_unlock_bh(&priv->_agn.notif_wait_lock); -} - -signed long iwlagn_wait_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - unsigned long timeout) -{ - int ret; - - ret = wait_event_timeout(priv->_agn.notif_waitq, - &wait_entry->triggered, - timeout); - - spin_lock_bh(&priv->_agn.notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&priv->_agn.notif_wait_lock); - - return ret; -} - -void iwlagn_remove_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry) -{ - spin_lock_bh(&priv->_agn.notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(&priv->_agn.notif_wait_lock); -} diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 2a4ff832fbb8..6d140bd53291 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -52,14 +52,10 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) { - struct iwl_notification_wait disable_wait; __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; - iwlagn_init_notification_wait(priv, &disable_wait, NULL, - REPLY_WIPAN_DEACTIVATION_COMPLETE); - send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; send->dev_type = RXON_DEV_TYPE_P2P; ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); @@ -67,18 +63,11 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, send->filter_flags = old_filter; send->dev_type = old_dev_type; - if (ret) { + if (ret) IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); - iwlagn_remove_notification(priv, &disable_wait); - } else { - signed long wait_res; - - wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ); - if (wait_res == 0) { - IWL_ERR(priv, "Timed out waiting for PAN disable\n"); - ret = -EIO; - } - } + + /* FIXME: WAIT FOR PAN DISABLE */ + msleep(300); return ret; } @@ -156,23 +145,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) { - struct ieee80211_channel *chan = priv->_agn.hw_roc_channel; - - iwl_set_rxon_channel(priv, chan, ctx); - iwl_set_flags_for_band(priv, ctx, chan->band, NULL); - ctx->staging.filter_flags |= - RXON_FILTER_ASSOC_MSK | - RXON_FILTER_PROMISC_MSK | - RXON_FILTER_CTL2HOST_MSK; - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - new_assoc = true; - - if (memcmp(&ctx->staging, &ctx->active, - sizeof(ctx->staging)) == 0) - return 0; - } - if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -574,10 +546,12 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { + iwl_led_associate(priv); priv->timestamp = bss_conf->timestamp; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; } else { ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_led_disassociate(priv); } } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 266490d8a397..24a11b8f73bc 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -539,14 +539,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) unsigned long flags; bool is_agg = false; - /* - * If the frame needs to go out off-channel, then - * we'll have put the PAN context to that channel, - * so make the frame go out there. - */ - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) - ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - else if (info->control.vif) + if (info->control.vif) ctx = iwl_rxon_ctx_from_vif(info->control.vif); spin_lock_irqsave(&priv->lock, flags); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index eb16647cfbe0..36335b1b54d4 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -59,7 +59,6 @@ #include "iwl-sta.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" -#include "iwl-agn-led.h" /****************************************************************************** @@ -847,7 +846,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv) * the appropriate handlers, including command responses, * frame-received notifications, and other notifications. */ -static void iwl_rx_handle(struct iwl_priv *priv) +void iwl_rx_handle(struct iwl_priv *priv) { struct iwl_rx_mem_buffer *rxb; struct iwl_rx_packet *pkt; @@ -911,27 +910,6 @@ static void iwl_rx_handle(struct iwl_priv *priv) (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && (pkt->hdr.cmd != REPLY_TX); - /* - * Do the notification wait before RX handlers so - * even if the RX handler consumes the RXB we have - * access to it in the notification wait entry. - */ - if (!list_empty(&priv->_agn.notif_waits)) { - struct iwl_notification_wait *w; - - spin_lock(&priv->_agn.notif_wait_lock); - list_for_each_entry(w, &priv->_agn.notif_waits, list) { - if (w->cmd == pkt->hdr.cmd) { - w->triggered = true; - if (w->fn) - w->fn(priv, pkt); - } - } - spin_unlock(&priv->_agn.notif_wait_lock); - - wake_up_all(&priv->_agn.notif_waitq); - } - /* Based on type of command response or notification, * handle those that need handling via function in * rx_handlers table. See iwl_setup_rx_handlers() */ @@ -2742,6 +2720,8 @@ static void iwl_alive_start(struct iwl_priv *priv) /* At this point, the NIC is initialized and operational */ iwl_rf_kill_ct_config(priv); + iwl_leds_init(priv); + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); wake_up_interruptible(&priv->wait_command_queue); @@ -3208,8 +3188,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; } - hw->wiphy->max_remain_on_channel_duration = 1000; - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; @@ -3235,8 +3213,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; - iwl_leds_init(priv); - ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); @@ -3281,7 +3257,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw) } } - iwlagn_led_enable(priv); + iwl_led_start(priv); out: priv->is_open = 1; @@ -3417,8 +3393,7 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_priv *priv = hw->priv; int ret = -EINVAL; @@ -3728,95 +3703,6 @@ void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) IWL_DEBUG_MAC80211(priv, "leave\n"); } -static void iwlagn_disable_roc(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel); - - lockdep_assert_held(&priv->mutex); - - if (!ctx->is_active) - return; - - ctx->staging.dev_type = RXON_DEV_TYPE_2STA; - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl_set_rxon_channel(priv, chan, ctx); - iwl_set_flags_for_band(priv, ctx, chan->band, NULL); - - priv->_agn.hw_roc_channel = NULL; - - iwlagn_commit_rxon(priv, ctx); - - ctx->is_active = false; -} - -static void iwlagn_bg_roc_done(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - _agn.hw_roc_work.work); - - mutex_lock(&priv->mutex); - ieee80211_remain_on_channel_expired(priv->hw); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->mutex); -} - -static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type, - int duration) -{ - struct iwl_priv *priv = hw->priv; - int err = 0; - - if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes & - BIT(NL80211_IFTYPE_P2P_CLIENT))) - return -EOPNOTSUPP; - - mutex_lock(&priv->mutex); - - if (priv->contexts[IWL_RXON_CTX_PAN].is_active || - test_bit(STATUS_SCAN_HW, &priv->status)) { - err = -EBUSY; - goto out; - } - - priv->contexts[IWL_RXON_CTX_PAN].is_active = true; - priv->_agn.hw_roc_channel = channel; - priv->_agn.hw_roc_chantype = channel_type; - priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024); - iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]); - queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work, - msecs_to_jiffies(duration + 20)); - - msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */ - ieee80211_ready_on_channel(priv->hw); - - out: - mutex_unlock(&priv->mutex); - - return err; -} - -static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - - if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - cancel_delayed_work_sync(&priv->_agn.hw_roc_work); - - mutex_lock(&priv->mutex); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->mutex); - - return 0; -} - /***************************************************************************** * * driver setup and teardown @@ -3838,7 +3724,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); - INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done); iwl_setup_scan_deferred_work(priv); @@ -4007,8 +3892,6 @@ struct ieee80211_ops iwlagn_hw_ops = { .channel_switch = iwlagn_mac_channel_switch, .flush = iwlagn_mac_flush, .tx_last_beacon = iwl_mac_tx_last_beacon, - .remain_on_channel = iwl_mac_remain_on_channel, - .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, }; #endif @@ -4136,10 +4019,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; priv->contexts[IWL_RXON_CTX_PAN].interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); -#ifdef CONFIG_IWL_P2P - priv->contexts[IWL_RXON_CTX_PAN].interface_modes |= - BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); -#endif priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; @@ -4387,9 +4266,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) * we need to set STATUS_EXIT_PENDING bit. */ set_bit(STATUS_EXIT_PENDING, &priv->status); - - iwl_leds_exit(priv); - if (priv->mac80211_registered) { ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; @@ -4610,49 +4486,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, -/* 2x00 Series */ - {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, - -/* 2x30 Series */ - {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, - -/* 6x35 Series */ - {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, - -/* 200 Series */ - {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)}, - -/* 230 Series */ - {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)}, - #endif /* CONFIG_IWL5000 */ {0} diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h index d00e1ea50a8d..da303585f801 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -96,17 +96,6 @@ extern struct iwl_cfg iwl100_bgn_cfg; extern struct iwl_cfg iwl100_bg_cfg; extern struct iwl_cfg iwl130_bgn_cfg; extern struct iwl_cfg iwl130_bg_cfg; -extern struct iwl_cfg iwl2000_2bgn_cfg; -extern struct iwl_cfg iwl2000_2bg_cfg; -extern struct iwl_cfg iwl2030_2bgn_cfg; -extern struct iwl_cfg iwl2030_2bg_cfg; -extern struct iwl_cfg iwl6035_2agn_cfg; -extern struct iwl_cfg iwl6035_2abg_cfg; -extern struct iwl_cfg iwl6035_2bg_cfg; -extern struct iwl_cfg iwl200_bg_cfg; -extern struct iwl_cfg iwl200_bgn_cfg; -extern struct iwl_cfg iwl230_bg_cfg; -extern struct iwl_cfg iwl230_bgn_cfg; extern struct iwl_mod_params iwlagn_mod_params; extern struct iwl_hcmd_ops iwlagn_hcmd; @@ -196,6 +185,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); +void iwl_rx_handle(struct iwl_priv *priv); /* tx */ void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); @@ -340,21 +330,6 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); -/* notification wait support */ -void __acquires(wait_entry) -iwlagn_init_notification_wait(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - void (*fn)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt), - u8 cmd); -signed long __releases(wait_entry) -iwlagn_wait_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry, - unsigned long timeout); -void __releases(wait_entry) -iwlagn_remove_notification(struct iwl_priv *priv, - struct iwl_notification_wait *wait_entry); - /* mac80211 handlers (for 4965) */ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); int iwlagn_mac_start(struct ieee80211_hw *hw); @@ -374,8 +349,7 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size); + struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwlagn_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h b/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h index 935b19e2c260..f893d4a6aa87 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -189,7 +189,6 @@ enum { REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, - REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd, REPLY_MAX = 0xff }; @@ -4370,11 +4369,6 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification) */ -/* - * Minimum slot time in TU - */ -#define IWL_MIN_SLOT_TIME 20 - /** * struct iwl_wipan_slot * @width: Time in TU diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c index a46ad60216a0..efbde1f1a8bf 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c @@ -227,8 +227,7 @@ int iwlcore_init_geos(struct iwl_priv *priv) geo_ch = &sband->channels[sband->n_channels++]; geo_ch->center_freq = - ieee80211_channel_to_frequency(ch->channel, - sband->band); + ieee80211_channel_to_frequency(ch->channel); geo_ch->max_power = ch->max_power_avg; geo_ch->max_antenna_gain = 0xff; geo_ch->hw_value = ch->channel; @@ -1404,10 +1403,9 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *tmp, *ctx = NULL; int err; - enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - viftype, vif->addr); + vif->type, vif->addr); mutex_lock(&priv->mutex); @@ -1431,7 +1429,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) continue; } - if (!(possible_modes & BIT(viftype))) + if (!(possible_modes & BIT(vif->type))) continue; /* have maybe usable context w/o interface */ @@ -1677,6 +1675,7 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv) { memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); + priv->led_tpt = 0; } /* @@ -1769,6 +1768,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) stats->data_cnt++; stats->data_bytes += len; } + iwl_leds_background(priv); } EXPORT_SYMBOL(iwl_update_stats); #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h index bbc5aa7a7f2f..a3474376fdbc 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h @@ -227,6 +227,8 @@ struct iwl_lib_ops { struct iwl_led_ops { int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); + int (*on)(struct iwl_priv *priv); + int (*off)(struct iwl_priv *priv); }; /* NIC specific ops */ @@ -492,6 +494,18 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) { + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_data(fc)) { + /* data */ + stats->data_bytes += len; + } + iwl_leds_background(priv); } #endif /***************************************************** diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h b/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h index 6c2b2df7ee7e..b80bf7dff55b 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -290,7 +290,7 @@ /* HW REV */ -#define CSR_HW_REV_TYPE_MSK (0x00001F0) +#define CSR_HW_REV_TYPE_MSK (0x00000F0) #define CSR_HW_REV_TYPE_3945 (0x00000D0) #define CSR_HW_REV_TYPE_4965 (0x0000000) #define CSR_HW_REV_TYPE_5300 (0x0000020) @@ -300,15 +300,9 @@ #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) -#define CSR_HW_REV_TYPE_6150 (0x0000084) -#define CSR_HW_REV_TYPE_6x05 (0x00000B0) -#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_2x30 (0x00000C0) -#define CSR_HW_REV_TYPE_2x00 (0x0000100) -#define CSR_HW_REV_TYPE_200 (0x0000110) -#define CSR_HW_REV_TYPE_230 (0x0000120) -#define CSR_HW_REV_TYPE_NONE (0x00001F0) +#define CSR_HW_REV_TYPE_6x50g2 (0x0000084) +#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) +#define CSR_HW_REV_TYPE_NONE (0x00000F0) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 418c8ac26222..6fe80b5e7a15 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -207,19 +207,18 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file, return ret; } +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - u32 val = 0; + u32 val; char *buf; ssize_t ret; - int i = 0; - bool device_format = false; - int offset = 0; - int len = 0; + int i; int pos = 0; - int sram; struct iwl_priv *priv = file->private_data; size_t bufsz; @@ -231,62 +230,35 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, else priv->dbgfs_sram_len = priv->ucode_data.len; } - len = priv->dbgfs_sram_len; - - if (len == -4) { - device_format = true; - len = 4; - } - - bufsz = 50 + len * 4; + bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; - pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", - len); + priv->dbgfs_sram_len); pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", priv->dbgfs_sram_offset); - - /* adjust sram address since reads are only on even u32 boundaries */ - offset = priv->dbgfs_sram_offset & 0x3; - sram = priv->dbgfs_sram_offset & ~0x3; - - /* read the first u32 from sram */ - val = iwl_read_targ_mem(priv, sram); - - for (; len; len--) { - /* put the address at the start of every line */ - if (i == 0) - pos += scnprintf(buf + pos, bufsz - pos, - "%08X: ", sram + offset); - - if (device_format) - pos += scnprintf(buf + pos, bufsz - pos, - "%02x", (val >> (8 * (3 - offset))) & 0xff); - else - pos += scnprintf(buf + pos, bufsz - pos, - "%02x ", (val >> (8 * offset)) & 0xff); - - /* if all bytes processed, read the next u32 from sram */ - if (++offset == 4) { - sram += 4; - offset = 0; - val = iwl_read_targ_mem(priv, sram); + for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { + val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \ + priv->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } } - - /* put in extra spaces and split lines for human readability */ - if (++i == 16) { - i = 0; + if (!(i % 16)) pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } else if (!(i & 7)) { - pos += scnprintf(buf + pos, bufsz - pos, " "); - } else if (!(i & 3)) { - pos += scnprintf(buf + pos, bufsz - pos, " "); - } + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); } - if (i) - pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); @@ -310,9 +282,6 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file, if (sscanf(buf, "%x,%x", &offset, &len) == 2) { priv->dbgfs_sram_offset = offset; priv->dbgfs_sram_len = len; - } else if (sscanf(buf, "%x", &offset) == 1) { - priv->dbgfs_sram_offset = offset; - priv->dbgfs_sram_len = -4; } else { priv->dbgfs_sram_offset = 0; priv->dbgfs_sram_len = 0; @@ -699,6 +668,29 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[256]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "allow blinking: %s\n", + (priv->allow_blinking) ? "True" : "False"); + if (priv->allow_blinking) { + pos += scnprintf(buf + pos, bufsz - pos, + "Led blinking rate: %u\n", + priv->last_blink_rate); + pos += scnprintf(buf + pos, bufsz - pos, + "Last blink time: %lu\n", + priv->last_blink_time); + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -864,6 +856,7 @@ DEBUGFS_READ_FILE_OPS(channels); DEBUGFS_READ_FILE_OPS(status); DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_FILE_OPS(led); DEBUGFS_READ_FILE_OPS(thermal_throttling); DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); @@ -1732,6 +1725,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR); if (!priv->cfg->base_params->broken_powersave) { DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h index 6fa1383d72ec..8dda67850af4 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -34,8 +34,6 @@ #include /* for struct pci_device_id */ #include -#include -#include #include #include "iwl-eeprom.h" @@ -997,6 +995,7 @@ struct reply_agg_tx_error_statistics { u32 unknown; }; +#ifdef CONFIG_IWLWIFI_DEBUGFS /* management statistics */ enum iwl_mgmt_stats { MANAGEMENT_ASSOC_REQ = 0, @@ -1027,13 +1026,16 @@ enum iwl_ctrl_stats { }; struct traffic_stats { -#ifdef CONFIG_IWLWIFI_DEBUGFS u32 mgmt[MANAGEMENT_MAX]; u32 ctrl[CONTROL_MAX]; u32 data_cnt; u64 data_bytes; -#endif }; +#else +struct traffic_stats { + u64 data_bytes; +}; +#endif /* * iwl_switch_rxon: "channel switch" structure @@ -1137,33 +1139,6 @@ struct iwl_force_reset { */ #define IWLAGN_EXT_BEACON_TIME_POS 22 -/** - * struct iwl_notification_wait - notification wait entry - * @list: list head for global list - * @fn: function called with the notification - * @cmd: command ID - * - * This structure is not used directly, to wait for a - * notification declare it on the stack, and call - * iwlagn_init_notification_wait() with appropriate - * parameters. Then do whatever will cause the ucode - * to notify the driver, and to wait for that then - * call iwlagn_wait_notification(). - * - * Each notification is one-shot. If at some point we - * need to support multi-shot notifications (which - * can't be allocated on the stack) we need to modify - * the code for them. - */ -struct iwl_notification_wait { - struct list_head list; - - void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt); - - u8 cmd; - bool triggered; -}; - enum iwl_rxon_context_id { IWL_RXON_CTX_BSS, IWL_RXON_CTX_PAN, @@ -1335,6 +1310,11 @@ struct iwl_priv { struct iwl_init_alive_resp card_alive_init; struct iwl_alive_resp card_alive; + unsigned long last_blink_time; + u8 last_blink_rate; + u8 allow_blinking; + u64 led_tpt; + u16 active_rate; u8 start_calib; @@ -1483,17 +1463,6 @@ struct iwl_priv { struct iwl_bt_notif_statistics delta_statistics_bt; struct iwl_bt_notif_statistics max_delta_bt; #endif - - /* notification wait support */ - struct list_head notif_waits; - spinlock_t notif_wait_lock; - wait_queue_head_t notif_waitq; - - /* remain-on-channel offload support */ - struct ieee80211_channel *hw_roc_channel; - struct delayed_work hw_roc_work; - enum nl80211_channel_type hw_roc_chantype; - int hw_roc_duration; } _agn; #endif }; @@ -1578,10 +1547,6 @@ struct iwl_priv { bool hw_ready; struct iwl_event_log event_log; - - struct led_classdev led; - unsigned long blink_on, blink_off; - bool led_registered; }; /*iwl_priv */ static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 98aa8af01192..9e6f31355eee 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -247,26 +247,13 @@ struct iwl_eeprom_enhanced_txpwr { #define EEPROM_6050_TX_POWER_VERSION (4) #define EEPROM_6050_EEPROM_VERSION (0x532) -/* 6150 Specific */ -#define EEPROM_6150_TX_POWER_VERSION (6) -#define EEPROM_6150_EEPROM_VERSION (0x553) - -/* 6x05 Specific */ -#define EEPROM_6005_TX_POWER_VERSION (6) -#define EEPROM_6005_EEPROM_VERSION (0x709) - -/* 6x30 Specific */ -#define EEPROM_6030_TX_POWER_VERSION (6) -#define EEPROM_6030_EEPROM_VERSION (0x709) - -/* 2x00 Specific */ -#define EEPROM_2000_TX_POWER_VERSION (6) -#define EEPROM_2000_EEPROM_VERSION (0x805) - -/* 6x35 Specific */ -#define EEPROM_6035_TX_POWER_VERSION (6) -#define EEPROM_6035_EEPROM_VERSION (0x753) +/* 6x50g2 Specific */ +#define EEPROM_6050G2_TX_POWER_VERSION (6) +#define EEPROM_6050G2_EEPROM_VERSION (0x553) +/* 6x00g2 Specific */ +#define EEPROM_6000G2_TX_POWER_VERSION (6) +#define EEPROM_6000G2_EEPROM_VERSION (0x709) /* OTP */ /* lower blocks contain EEPROM image and calibration data */ @@ -277,7 +264,6 @@ struct iwl_eeprom_enhanced_txpwr { #define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ #define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ #define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ -#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ /* 2.4 GHz */ extern const u8 iwl_eeprom_band_1[14]; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c index e4b953d7b7bf..c373b53babea 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -108,7 +108,6 @@ const char *get_cmd_string(u8 cmd) IWL_CMD(REPLY_WIPAN_WEPKEY); IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); - IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); default: return "UNKNOWN"; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-led.c b/trunk/drivers/net/wireless/iwlwifi/iwl-led.c index 074ad2275228..46ccdf406e8e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-led.c @@ -48,19 +48,31 @@ module_param(led_mode, int, S_IRUGO); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); -static const struct ieee80211_tpt_blink iwl_blink[] = { - { .throughput = 0 * 1024 - 1, .blink_time = 334 }, - { .throughput = 1 * 1024 - 1, .blink_time = 260 }, - { .throughput = 5 * 1024 - 1, .blink_time = 220 }, - { .throughput = 10 * 1024 - 1, .blink_time = 190 }, - { .throughput = 20 * 1024 - 1, .blink_time = 170 }, - { .throughput = 50 * 1024 - 1, .blink_time = 150 }, - { .throughput = 70 * 1024 - 1, .blink_time = 130 }, - { .throughput = 100 * 1024 - 1, .blink_time = 110 }, - { .throughput = 200 * 1024 - 1, .blink_time = 80 }, - { .throughput = 300 * 1024 - 1, .blink_time = 50 }, +static const struct { + u16 tpt; /* Mb/s */ + u8 on_time; + u8 off_time; +} blink_tbl[] = +{ + {300, 25, 25}, + {200, 40, 40}, + {100, 55, 55}, + {70, 65, 65}, + {50, 75, 75}, + {20, 85, 85}, + {10, 95, 95}, + {5, 110, 110}, + {1, 130, 130}, + {0, 167, 167}, + /* SOLID_ON */ + {-1, IWL_LED_SOLID, 0} }; +#define IWL_1MB_RATE (128 * 1024) +#define IWL_LED_THRESHOLD (16) +#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */ +#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) + /* * Adjust led blink rate to compensate on a MAC Clock difference on every HW * Led blink rate analysis showed an average deviation of 0% on 3945, @@ -85,104 +97,133 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv, } /* Set led pattern command */ -static int iwl_led_cmd(struct iwl_priv *priv, - unsigned long on, - unsigned long off) +static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx) { struct iwl_led_cmd led_cmd = { .id = IWL_LED_LINK, .interval = IWL_DEF_LED_INTRVL }; - int ret; - if (!test_bit(STATUS_READY, &priv->status)) - return -EBUSY; + BUG_ON(idx > IWL_MAX_BLINK_TBL); - if (priv->blink_on == on && priv->blink_off == off) - return 0; - - IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", + IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", priv->cfg->base_params->led_compensation); - led_cmd.on = iwl_blink_compensation(priv, on, + led_cmd.on = + iwl_blink_compensation(priv, blink_tbl[idx].on_time, priv->cfg->base_params->led_compensation); - led_cmd.off = iwl_blink_compensation(priv, off, + led_cmd.off = + iwl_blink_compensation(priv, blink_tbl[idx].off_time, priv->cfg->base_params->led_compensation); - ret = priv->cfg->ops->led->cmd(priv, &led_cmd); - if (!ret) { - priv->blink_on = on; - priv->blink_off = off; - } - return ret; + return priv->cfg->ops->led->cmd(priv, &led_cmd); } -static void iwl_led_brightness_set(struct led_classdev *led_cdev, - enum led_brightness brightness) +int iwl_led_start(struct iwl_priv *priv) { - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - unsigned long on = 0; + return priv->cfg->ops->led->on(priv); +} +EXPORT_SYMBOL(iwl_led_start); - if (brightness > 0) - on = IWL_LED_SOLID; +int iwl_led_associate(struct iwl_priv *priv) +{ + IWL_DEBUG_LED(priv, "Associated\n"); + if (priv->cfg->led_mode == IWL_LED_BLINK) + priv->allow_blinking = 1; + priv->last_blink_time = jiffies; - iwl_led_cmd(priv, on, 0); + return 0; } +EXPORT_SYMBOL(iwl_led_associate); -static int iwl_led_blink_set(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) +int iwl_led_disassociate(struct iwl_priv *priv) { - struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + priv->allow_blinking = 0; - return iwl_led_cmd(priv, *delay_on, *delay_off); + return 0; } +EXPORT_SYMBOL(iwl_led_disassociate); -void iwl_leds_init(struct iwl_priv *priv) +/* + * calculate blink rate according to last second Tx/Rx activities + */ +static int iwl_get_blink_rate(struct iwl_priv *priv) +{ + int i; + /* count both tx and rx traffic to be able to + * handle traffic in either direction + */ + u64 current_tpt = priv->tx_stats.data_bytes + + priv->rx_stats.data_bytes; + s64 tpt = current_tpt - priv->led_tpt; + + if (tpt < 0) /* wraparound */ + tpt = -tpt; + + IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n", + (long long)tpt, + (unsigned long long)current_tpt); + priv->led_tpt = current_tpt; + + if (!priv->allow_blinking) + i = IWL_MAX_BLINK_TBL; + else + for (i = 0; i < IWL_MAX_BLINK_TBL; i++) + if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) + break; + + IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i); + return i; +} + +/* + * this function called from handler. Since setting Led command can + * happen very frequent we postpone led command to be called from + * REPLY handler so we know ucode is up + */ +void iwl_leds_background(struct iwl_priv *priv) { - int mode = led_mode; - int ret; - - if (mode == IWL_LED_DEFAULT) - mode = priv->cfg->led_mode; - - priv->led.name = kasprintf(GFP_KERNEL, "%s-led", - wiphy_name(priv->hw->wiphy)); - priv->led.brightness_set = iwl_led_brightness_set; - priv->led.blink_set = iwl_led_blink_set; - priv->led.max_brightness = 1; - - switch (mode) { - case IWL_LED_DEFAULT: - WARN_ON(1); - break; - case IWL_LED_BLINK: - priv->led.default_trigger = - ieee80211_create_tpt_led_trigger(priv->hw, - IEEE80211_TPT_LEDTRIG_FL_CONNECTED, - iwl_blink, ARRAY_SIZE(iwl_blink)); - break; - case IWL_LED_RF_STATE: - priv->led.default_trigger = - ieee80211_get_radio_led_name(priv->hw); - break; + u8 blink_idx; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + priv->last_blink_time = 0; + return; + } + if (iwl_is_rfkill(priv)) { + priv->last_blink_time = 0; + return; } - ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); - if (ret) { - kfree(priv->led.name); + if (!priv->allow_blinking) { + priv->last_blink_time = 0; + if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { + priv->last_blink_rate = IWL_SOLID_BLINK_IDX; + iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX); + } return; } + if (!priv->last_blink_time || + !time_after(jiffies, priv->last_blink_time + + msecs_to_jiffies(1000))) + return; + + blink_idx = iwl_get_blink_rate(priv); - priv->led_registered = true; + /* call only if blink rate change */ + if (blink_idx != priv->last_blink_rate) + iwl_led_pattern(priv, blink_idx); + + priv->last_blink_time = jiffies; + priv->last_blink_rate = blink_idx; } -EXPORT_SYMBOL(iwl_leds_init); +EXPORT_SYMBOL(iwl_leds_background); -void iwl_leds_exit(struct iwl_priv *priv) +void iwl_leds_init(struct iwl_priv *priv) { - if (!priv->led_registered) - return; - - led_classdev_unregister(&priv->led); - kfree(priv->led.name); + priv->last_blink_rate = 0; + priv->last_blink_time = 0; + priv->allow_blinking = 0; + if (led_mode != IWL_LED_DEFAULT && + led_mode != priv->cfg->led_mode) + priv->cfg->led_mode = led_mode; } -EXPORT_SYMBOL(iwl_leds_exit); +EXPORT_SYMBOL(iwl_leds_init); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-led.h b/trunk/drivers/net/wireless/iwlwifi/iwl-led.h index 101eef12b3bb..9079b33486ef 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-led.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-led.h @@ -31,14 +31,23 @@ struct iwl_priv; #define IWL_LED_SOLID 11 +#define IWL_LED_NAME_LEN 31 #define IWL_DEF_LED_INTRVL cpu_to_le32(1000) #define IWL_LED_ACTIVITY (0<<1) #define IWL_LED_LINK (1<<1) +enum led_type { + IWL_LED_TRG_TX, + IWL_LED_TRG_RX, + IWL_LED_TRG_ASSOC, + IWL_LED_TRG_RADIO, + IWL_LED_TRG_MAX, +}; + /* * LED mode - * IWL_LED_DEFAULT: use device default + * IWL_LED_DEFAULT: use system default * IWL_LED_RF_STATE: turn LED on/off based on RF state * LED ON = RF ON * LED OFF = RF OFF @@ -51,6 +60,9 @@ enum iwl_led_mode { }; void iwl_leds_init(struct iwl_priv *priv); -void iwl_leds_exit(struct iwl_priv *priv); +void iwl_leds_background(struct iwl_priv *priv); +int iwl_led_start(struct iwl_priv *priv); +int iwl_led_associate(struct iwl_priv *priv); +int iwl_led_disassociate(struct iwl_priv *priv); #endif /* __iwl_leds_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-legacy.c b/trunk/drivers/net/wireless/iwlwifi/iwl-legacy.c index 927fe37a43ab..bb1a742a98a0 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-legacy.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-legacy.c @@ -332,6 +332,7 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv, { struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); + iwl_led_disassociate(priv); /* * inform the ucode that there is no longer an * association and that no more packets should be @@ -519,6 +520,8 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, if (bss_conf->assoc) { priv->timestamp = bss_conf->timestamp; + iwl_led_associate(priv); + if (!iwl_is_rfkill(priv)) priv->cfg->ops->legacy->post_associate(priv); } else @@ -542,6 +545,7 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + iwl_led_associate(priv); priv->cfg->ops->legacy->config_ap(priv); } else iwl_set_no_assoc(priv, vif); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c index 9c986f272c2d..371abbf60eac 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -2540,6 +2540,8 @@ static void iwl3945_alive_start(struct iwl_priv *priv) iwl3945_reg_txpower_periodic(priv); + iwl_leds_init(priv); + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); set_bit(STATUS_READY, &priv->status); wake_up_interruptible(&priv->wait_command_queue); @@ -3168,6 +3170,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) * no need to poll the killswitch state anymore */ cancel_delayed_work(&priv->_3945.rfkill_poll); + iwl_led_start(priv); + priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; @@ -3931,8 +3935,6 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; - iwl_leds_init(priv); - ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); @@ -4192,8 +4194,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) set_bit(STATUS_EXIT_PENDING, &priv->status); - iwl_leds_exit(priv); - if (priv->mac80211_registered) { ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; diff --git a/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c index ed57e4402800..5a4982271e96 100644 --- a/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/trunk/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -287,8 +287,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm) return -EINVAL; } - freq = ieee80211_channel_to_frequency(umac_bss->channel, - band->band); + freq = ieee80211_channel_to_frequency(umac_bss->channel); channel = ieee80211_get_channel(wiphy, freq); signal = umac_bss->rssi * 100; diff --git a/trunk/drivers/net/wireless/iwmc3200wifi/rx.c b/trunk/drivers/net/wireless/iwmc3200wifi/rx.c index 9a57cf6a488f..a944893ae3ca 100644 --- a/trunk/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/trunk/drivers/net/wireless/iwmc3200wifi/rx.c @@ -543,10 +543,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, switch (le32_to_cpu(complete->status)) { case UMAC_ASSOC_COMPLETE_SUCCESS: chan = ieee80211_get_channel(wiphy, - ieee80211_channel_to_frequency(complete->channel, - complete->band == UMAC_BAND_2GHZ ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ)); + ieee80211_channel_to_frequency(complete->channel)); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { /* Associated to a unallowed channel, disassociate. */ __iwm_invalidate_mlme_profile(iwm); @@ -844,7 +841,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, goto err; } - freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band); + freq = ieee80211_channel_to_frequency(umac_bss->channel); channel = ieee80211_get_channel(wiphy, freq); signal = umac_bss->rssi * 100; diff --git a/trunk/drivers/net/wireless/libertas/cfg.c b/trunk/drivers/net/wireless/libertas/cfg.c index 30ef0351bfc4..698a1f7694ed 100644 --- a/trunk/drivers/net/wireless/libertas/cfg.c +++ b/trunk/drivers/net/wireless/libertas/cfg.c @@ -607,8 +607,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, /* No channel, no luck */ if (chan_no != -1) { struct wiphy *wiphy = priv->wdev->wiphy; - int freq = ieee80211_channel_to_frequency(chan_no, - IEEE80211_BAND_2GHZ); + int freq = ieee80211_channel_to_frequency(chan_no); struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); @@ -1598,8 +1597,7 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev, lbs_deb_enter(LBS_DEB_CFG80211); survey->channel = ieee80211_get_channel(wiphy, - ieee80211_channel_to_frequency(priv->channel, - IEEE80211_BAND_2GHZ)); + ieee80211_channel_to_frequency(priv->channel)); ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { diff --git a/trunk/drivers/net/wireless/mac80211_hwsim.c b/trunk/drivers/net/wireless/mac80211_hwsim.c index 5d39b2840584..454f045ddff3 100644 --- a/trunk/drivers/net/wireless/mac80211_hwsim.c +++ b/trunk/drivers/net/wireless/mac80211_hwsim.c @@ -943,8 +943,7 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { switch (action) { case IEEE80211_AMPDU_TX_START: diff --git a/trunk/drivers/net/wireless/mwl8k.c b/trunk/drivers/net/wireless/mwl8k.c index af4f2c64f242..9ecf8407cb1b 100644 --- a/trunk/drivers/net/wireless/mwl8k.c +++ b/trunk/drivers/net/wireless/mwl8k.c @@ -232,9 +232,6 @@ struct mwl8k_priv { struct completion firmware_loading_complete; }; -#define MAX_WEP_KEY_LEN 13 -#define NUM_WEP_KEYS 4 - /* Per interface specific private data */ struct mwl8k_vif { struct list_head list; @@ -245,21 +242,8 @@ struct mwl8k_vif { /* Non AMPDU sequence number assigned by driver. */ u16 seqno; - - /* Saved WEP keys */ - struct { - u8 enabled; - u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN]; - } wep_key_conf[NUM_WEP_KEYS]; - - /* BSSID */ - u8 bssid[ETH_ALEN]; - - /* A flag to indicate is HW crypto is enabled for this bssid */ - bool is_hw_crypto_enabled; }; #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) -#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8)) struct mwl8k_sta { /* Index into station database. Returned by UPDATE_STADB. */ @@ -353,7 +337,6 @@ static const struct ieee80211_rate mwl8k_rates_50[] = { #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 #define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ #define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ -#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */ #define MWL8K_CMD_UPDATE_STADB 0x1123 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) @@ -392,7 +375,6 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) MWL8K_CMDNAME(SET_RATEADAPT_MODE); MWL8K_CMDNAME(BSS_START); MWL8K_CMDNAME(SET_NEW_STN); - MWL8K_CMDNAME(UPDATE_ENCRYPTION); MWL8K_CMDNAME(UPDATE_STADB); default: snprintf(buf, bufsize, "0x%x", cmd); @@ -733,12 +715,10 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) skb_pull(skb, sizeof(*tr) - hdrlen); } -static void -mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad) +static inline void mwl8k_add_dma_header(struct sk_buff *skb) { struct ieee80211_hdr *wh; int hdrlen; - int reqd_hdrlen; struct mwl8k_dma_data *tr; /* @@ -750,13 +730,11 @@ mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad) wh = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(wh->frame_control); - reqd_hdrlen = sizeof(*tr); - - if (hdrlen != reqd_hdrlen) - skb_push(skb, reqd_hdrlen - hdrlen); + if (hdrlen != sizeof(*tr)) + skb_push(skb, sizeof(*tr) - hdrlen); if (ieee80211_is_data_qos(wh->frame_control)) - hdrlen -= IEEE80211_QOS_CTL_LEN; + hdrlen -= 2; tr = (struct mwl8k_dma_data *)skb->data; if (wh != &tr->wh) @@ -769,52 +747,9 @@ mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad) * payload". That is, everything except for the 802.11 header. * This includes all crypto material including the MIC. */ - tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad); + tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr)); } -static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb) -{ - struct ieee80211_hdr *wh; - struct ieee80211_tx_info *tx_info; - struct ieee80211_key_conf *key_conf; - int data_pad; - - wh = (struct ieee80211_hdr *)skb->data; - - tx_info = IEEE80211_SKB_CB(skb); - - key_conf = NULL; - if (ieee80211_is_data(wh->frame_control)) - key_conf = tx_info->control.hw_key; - - /* - * Make sure the packet header is in the DMA header format (4-address - * without QoS), the necessary crypto padding between the header and the - * payload has already been provided by mac80211, but it doesn't add tail - * padding when HW crypto is enabled. - * - * We have the following trailer padding requirements: - * - WEP: 4 trailer bytes (ICV) - * - TKIP: 12 trailer bytes (8 MIC + 4 ICV) - * - CCMP: 8 trailer bytes (MIC) - */ - data_pad = 0; - if (key_conf != NULL) { - switch (key_conf->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - data_pad = 4; - break; - case WLAN_CIPHER_SUITE_TKIP: - data_pad = 12; - break; - case WLAN_CIPHER_SUITE_CCMP: - data_pad = 8; - break; - } - } - mwl8k_add_dma_header(skb, data_pad); -} /* * Packet reception for 88w8366 AP firmware. @@ -843,13 +778,6 @@ struct mwl8k_rxd_8366_ap { #define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 -/* 8366 AP rx_status bits */ -#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80 -#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF -#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02 -#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04 -#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08 - static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) { struct mwl8k_rxd_8366_ap *rxd = _rxd; @@ -906,16 +834,10 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, } else { status->band = IEEE80211_BAND_2GHZ; } - status->freq = ieee80211_channel_to_frequency(rxd->channel, - status->band); + status->freq = ieee80211_channel_to_frequency(rxd->channel); *qos = rxd->qos_control; - if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) && - (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) && - (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR)) - status->flag |= RX_FLAG_MMIC_ERROR; - return le16_to_cpu(rxd->pkt_len); } @@ -954,11 +876,6 @@ struct mwl8k_rxd_sta { #define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 #define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 -#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04 -/* ICV=0 or MIC=1 */ -#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08 -/* Key is uploaded only in failure case */ -#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30 static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) { @@ -1014,13 +931,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, } else { status->band = IEEE80211_BAND_2GHZ; } - status->freq = ieee80211_channel_to_frequency(rxd->channel, - status->band); + status->freq = ieee80211_channel_to_frequency(rxd->channel); *qos = rxd->qos_control; - if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) && - (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE)) - status->flag |= RX_FLAG_MMIC_ERROR; return le16_to_cpu(rxd->pkt_len); } @@ -1179,25 +1092,9 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, ieee80211_queue_work(hw, &priv->finalize_join_worker); } -static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list, - u8 *bssid) -{ - struct mwl8k_vif *mwl8k_vif; - - list_for_each_entry(mwl8k_vif, - vif_list, list) { - if (memcmp(bssid, mwl8k_vif->bssid, - ETH_ALEN) == 0) - return mwl8k_vif; - } - - return NULL; -} - static int rxq_process(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; - struct mwl8k_vif *mwl8k_vif = NULL; struct mwl8k_rx_queue *rxq = priv->rxq + index; int processed; @@ -1207,7 +1104,6 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) void *rxd; int pkt_len; struct ieee80211_rx_status status; - struct ieee80211_hdr *wh; __le16 qos; skb = rxq->buf[rxq->head].skb; @@ -1234,7 +1130,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) rxq->rxd_count--; - wh = &((struct mwl8k_dma_data *)skb->data)->wh; + skb_put(skb, pkt_len); + mwl8k_remove_dma_header(skb, qos); /* * Check for a pending join operation. Save a @@ -1244,46 +1141,6 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) if (mwl8k_capture_bssid(priv, (void *)skb->data)) mwl8k_save_beacon(hw, skb); - if (ieee80211_has_protected(wh->frame_control)) { - - /* Check if hw crypto has been enabled for - * this bss. If yes, set the status flags - * accordingly - */ - mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list, - wh->addr1); - - if (mwl8k_vif != NULL && - mwl8k_vif->is_hw_crypto_enabled == true) { - /* - * When MMIC ERROR is encountered - * by the firmware, payload is - * dropped and only 32 bytes of - * mwl8k Firmware header is sent - * to the host. - * - * We need to add four bytes of - * key information. In it - * MAC80211 expects keyidx set to - * 0 for triggering Counter - * Measure of MMIC failure. - */ - if (status.flag & RX_FLAG_MMIC_ERROR) { - struct mwl8k_dma_data *tr; - tr = (struct mwl8k_dma_data *)skb->data; - memset((void *)&(tr->data), 0, 4); - pkt_len += 4; - } - - if (!ieee80211_is_auth(wh->frame_control)) - status.flag |= RX_FLAG_IV_STRIPPED | - RX_FLAG_DECRYPTED | - RX_FLAG_MMIC_STRIPPED; - } - } - - skb_put(skb, pkt_len); - mwl8k_remove_dma_header(skb, qos); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx_irqsafe(hw, skb); @@ -1586,11 +1443,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) else qos = 0; - if (priv->ap_fw) - mwl8k_encapsulate_tx_frame(skb); - else - mwl8k_add_dma_header(skb, 0); - + mwl8k_add_dma_header(skb); wh = &((struct mwl8k_dma_data *)skb->data)->wh; tx_info = IEEE80211_SKB_CB(skb); @@ -3245,274 +3098,6 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, return rc; } -/* - * CMD_UPDATE_ENCRYPTION. - */ - -#define MAX_ENCR_KEY_LENGTH 16 -#define MIC_KEY_LENGTH 8 - -struct mwl8k_cmd_update_encryption { - struct mwl8k_cmd_pkt header; - - __le32 action; - __le32 reserved; - __u8 mac_addr[6]; - __u8 encr_type; - -} __attribute__((packed)); - -struct mwl8k_cmd_set_key { - struct mwl8k_cmd_pkt header; - - __le32 action; - __le32 reserved; - __le16 length; - __le16 key_type_id; - __le32 key_info; - __le32 key_id; - __le16 key_len; - __u8 key_material[MAX_ENCR_KEY_LENGTH]; - __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; - __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; - __le16 tkip_rsc_low; - __le32 tkip_rsc_high; - __le16 tkip_tsc_low; - __le32 tkip_tsc_high; - __u8 mac_addr[6]; -} __attribute__((packed)); - -enum { - MWL8K_ENCR_ENABLE, - MWL8K_ENCR_SET_KEY, - MWL8K_ENCR_REMOVE_KEY, - MWL8K_ENCR_SET_GROUP_KEY, -}; - -#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0 -#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1 -#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4 -#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7 -#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8 - -enum { - MWL8K_ALG_WEP, - MWL8K_ALG_TKIP, - MWL8K_ALG_CCMP, -}; - -#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004 -#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008 -#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040 -#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000 -#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000 - -static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u8 *addr, - u8 encr_type) -{ - struct mwl8k_cmd_update_encryption *cmd; - int rc; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - - cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); - cmd->header.length = cpu_to_le16(sizeof(*cmd)); - cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE); - memcpy(cmd->mac_addr, addr, ETH_ALEN); - cmd->encr_type = encr_type; - - rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); - kfree(cmd); - - return rc; -} - -static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd, - u8 *addr, - struct ieee80211_key_conf *key) -{ - cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); - cmd->header.length = cpu_to_le16(sizeof(*cmd)); - cmd->length = cpu_to_le16(sizeof(*cmd) - - offsetof(struct mwl8k_cmd_set_key, length)); - cmd->key_id = cpu_to_le32(key->keyidx); - cmd->key_len = cpu_to_le16(key->keylen); - memcpy(cmd->mac_addr, addr, ETH_ALEN); - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP); - if (key->keyidx == 0) - cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY); - - break; - case WLAN_CIPHER_SUITE_TKIP: - cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP); - cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) - : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); - cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID - | MWL8K_KEY_FLAG_TSC_VALID); - break; - case WLAN_CIPHER_SUITE_CCMP: - cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP); - cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) - : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); - break; - default: - return -ENOTSUPP; - } - - return 0; -} - -static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u8 *addr, - struct ieee80211_key_conf *key) -{ - struct mwl8k_cmd_set_key *cmd; - int rc; - int keymlen; - u32 action; - u8 idx; - struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - - rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); - if (rc < 0) - goto done; - - idx = key->keyidx; - - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - action = MWL8K_ENCR_SET_KEY; - else - action = MWL8K_ENCR_SET_GROUP_KEY; - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - if (!mwl8k_vif->wep_key_conf[idx].enabled) { - memcpy(mwl8k_vif->wep_key_conf[idx].key, key, - sizeof(*key) + key->keylen); - mwl8k_vif->wep_key_conf[idx].enabled = 1; - } - - keymlen = 0; - action = MWL8K_ENCR_SET_KEY; - break; - case WLAN_CIPHER_SUITE_TKIP: - keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH; - break; - case WLAN_CIPHER_SUITE_CCMP: - keymlen = key->keylen; - break; - default: - rc = -ENOTSUPP; - goto done; - } - - memcpy(cmd->key_material, key->key, keymlen); - cmd->action = cpu_to_le32(action); - - rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); -done: - kfree(cmd); - - return rc; -} - -static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u8 *addr, - struct ieee80211_key_conf *key) -{ - struct mwl8k_cmd_set_key *cmd; - int rc; - struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - return -ENOMEM; - - rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); - if (rc < 0) - goto done; - - if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || - WLAN_CIPHER_SUITE_WEP104) - mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0; - - cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY); - - rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); -done: - kfree(cmd); - - return rc; -} - -static int mwl8k_set_key(struct ieee80211_hw *hw, - enum set_key_cmd cmd_param, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - int rc = 0; - u8 encr_type; - u8 *addr; - struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); - - if (vif->type == NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; - - if (sta == NULL) - addr = hw->wiphy->perm_addr; - else - addr = sta->addr; - - if (cmd_param == SET_KEY) { - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key); - if (rc) - goto out; - - if ((key->cipher == WLAN_CIPHER_SUITE_WEP40) - || (key->cipher == WLAN_CIPHER_SUITE_WEP104)) - encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP; - else - encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED; - - rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr, - encr_type); - if (rc) - goto out; - - mwl8k_vif->is_hw_crypto_enabled = true; - - } else { - rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key); - - if (rc) - goto out; - - mwl8k_vif->is_hw_crypto_enabled = false; - - } -out: - return rc; -} - /* * CMD_UPDATE_STADB. */ @@ -3884,8 +3469,6 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, mwl8k_vif->vif = vif; mwl8k_vif->macid = macid; mwl8k_vif->seqno = 0; - memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN); - mwl8k_vif->is_hw_crypto_enabled = false; /* Set the mac address. */ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); @@ -4283,27 +3866,18 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw, { struct mwl8k_priv *priv = hw->priv; int ret; - int i; - struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); - struct ieee80211_key_conf *key; if (!priv->ap_fw) { ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); if (ret >= 0) { MWL8K_STA(sta)->peer_id = ret; - ret = 0; + return 0; } - } else { - ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta); + return ret; } - for (i = 0; i < NUM_WEP_KEYS; i++) { - key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key); - if (mwl8k_vif->wep_key_conf[i].enabled) - mwl8k_set_key(hw, SET_KEY, vif, sta, key); - } - return ret; + return mwl8k_cmd_set_new_stn_add(hw, vif, sta); } static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, @@ -4358,8 +3932,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, static int mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { switch (action) { case IEEE80211_AMPDU_RX_START: @@ -4382,7 +3955,6 @@ static const struct ieee80211_ops mwl8k_ops = { .bss_info_changed = mwl8k_bss_info_changed, .prepare_multicast = mwl8k_prepare_multicast, .configure_filter = mwl8k_configure_filter, - .set_key = mwl8k_set_key, .set_rts_threshold = mwl8k_set_rts_threshold, .sta_add = mwl8k_sta_add, .sta_remove = mwl8k_sta_remove, diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800.h b/trunk/drivers/net/wireless/rt2x00/rt2800.h index c7e615cebac1..4c55e8525cad 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2800.h @@ -1804,12 +1804,6 @@ struct mac_iveiv_entry { */ #define RFCSR30_RF_CALIBRATION FIELD8(0x80) -/* - * RFCSR 31: - */ -#define RFCSR31_RX_AGC_FC FIELD8(0x1f) -#define RFCSR31_RX_H20M FIELD8(0x20) - /* * RF registers */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c index f8ba01cbc6dd..54917a281398 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2436,10 +2436,6 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); rt2800_bbp_write(rt2x00dev, 4, bbp); - rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40); - rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); - rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); @@ -2514,7 +2510,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 4, 0x40); rt2800_rfcsr_write(rt2x00dev, 5, 0x03); rt2800_rfcsr_write(rt2x00dev, 6, 0x02); - rt2800_rfcsr_write(rt2x00dev, 7, 0x60); + rt2800_rfcsr_write(rt2x00dev, 7, 0x70); rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); rt2800_rfcsr_write(rt2x00dev, 10, 0x41); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); @@ -2606,12 +2602,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, LDO_CFG0, reg); } else if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090)) { - rt2800_rfcsr_write(rt2x00dev, 31, 0x14); - rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); + rt2800_rfcsr_write(rt2x00dev, 31, 0x14); + rt2800_register_read(rt2x00dev, LDO_CFG0, ®); rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1); if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || @@ -2623,10 +2619,6 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0); } rt2800_register_write(rt2x00dev, LDO_CFG0, reg); - - rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); - rt2x00_set_field32(®, GPIO_SWITCH_5, 0); - rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); } else if (rt2x00_rt(rt2x00dev, RT3390)) { rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); rt2x00_set_field32(®, GPIO_SWITCH_5, 0); @@ -2678,11 +2670,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); - if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || + if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { - if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); } rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); @@ -2695,7 +2686,6 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT3090)) { rt2800_bbp_read(rt2x00dev, 138, &bbp); - /* Turn off unused DAC1 and ADC1 to reduce power consumption */ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); @@ -2729,9 +2719,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); } - if (rt2x00_rt(rt2x00dev, RT3070)) { + if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) { rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr); - if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) + if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || + rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E)) rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3); else rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0); @@ -2819,7 +2810,10 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); /* Wait for DMA, ignore error */ @@ -2829,6 +2823,9 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0); rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); } EXPORT_SYMBOL_GPL(rt2800_disable_radio); @@ -3533,8 +3530,7 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { int ret = 0; diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.h b/trunk/drivers/net/wireless/rt2x00/rt2800lib.h index 3efafb78ff77..e3c995a9dec4 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.h @@ -198,8 +198,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, u64 rt2800_get_tsf(struct ieee80211_hw *hw); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size); + struct ieee80211_sta *sta, u16 tid, u16 *ssn); int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c index bfc2fc5c1c22..aa97971a38af 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c @@ -475,23 +475,39 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) { - if (rt2x00_is_soc(rt2x00dev)) { - rt2800_disable_radio(rt2x00dev); - rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); - rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); - } + u32 reg; + + rt2800_disable_radio(rt2x00dev); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); + + rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); + rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); } static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { + /* + * Always put the device to sleep (even when we intend to wakeup!) + * if the device is booting and wasn't asleep it will return + * failure when attempting to wakeup. + */ + rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); + if (state == STATE_AWAKE) { - rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); + rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); - } else if (state == STATE_SLEEP) { - rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); - rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); - rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); } return 0; diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c index 31b7db05abd9..9597a03242cc 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -649,10 +649,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry, const int channel, const int tx_power, const int value) { - /* XXX: this assumption about the band is wrong for 802.11j */ - entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - entry->center_freq = ieee80211_channel_to_frequency(channel, - entry->band); + entry->center_freq = ieee80211_channel_to_frequency(channel); entry->hw_value = value; entry->max_power = tx_power; entry->max_antenna_gain = 0xff; diff --git a/trunk/drivers/net/wireless/rt2x00/rt73usb.c b/trunk/drivers/net/wireless/rt2x00/rt73usb.c index 029be3c6c030..0b4e8590cbb7 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt73usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt73usb.c @@ -2446,7 +2446,6 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, - { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, diff --git a/trunk/drivers/net/wireless/rtlwifi/core.c b/trunk/drivers/net/wireless/rtlwifi/core.c index 25d2d667ffba..d6a924a05654 100644 --- a/trunk/drivers/net/wireless/rtlwifi/core.c +++ b/trunk/drivers/net/wireless/rtlwifi/core.c @@ -748,8 +748,7 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw, static int rtl_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u16 * ssn) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/trunk/drivers/net/wireless/rtlwifi/pci.c b/trunk/drivers/net/wireless/rtlwifi/pci.c index 1758d4463247..0fa36aa6701a 100644 --- a/trunk/drivers/net/wireless/rtlwifi/pci.c +++ b/trunk/drivers/net/wireless/rtlwifi/pci.c @@ -619,13 +619,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) struct sk_buff *uskb = NULL; u8 *pdata; uskb = dev_alloc_skb(skb->len + 128); - if (!uskb) { - RT_TRACE(rtlpriv, - (COMP_INTR | COMP_RECV), - DBG_EMERG, - ("can't alloc rx skb\n")); - goto done; - } memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); @@ -648,7 +641,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) new_skb = dev_alloc_skb(rtlpci->rxbuffersize); if (unlikely(!new_skb)) { RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), - DBG_EMERG, + DBG_DMESG, ("can't alloc skb for rx\n")); goto done; } @@ -1073,9 +1066,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) struct sk_buff *skb = dev_alloc_skb(rtlpci->rxbuffersize); u32 bufferaddress; + entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; if (!skb) return 0; - entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; /*skb->dev = dev; */ diff --git a/trunk/drivers/net/wireless/wl1251/rx.c b/trunk/drivers/net/wireless/wl1251/rx.c index 86eef456d7b2..efa53607d5c9 100644 --- a/trunk/drivers/net/wireless/wl1251/rx.c +++ b/trunk/drivers/net/wireless/wl1251/rx.c @@ -78,8 +78,7 @@ static void wl1251_rx_status(struct wl1251 *wl, */ wl->noise = desc->rssi - desc->snr / 2; - status->freq = ieee80211_channel_to_frequency(desc->channel, - status->band); + status->freq = ieee80211_channel_to_frequency(desc->channel); status->flag |= RX_FLAG_TSFT; diff --git a/trunk/drivers/net/wireless/wl12xx/rx.c b/trunk/drivers/net/wireless/wl12xx/rx.c index ec8d843d41cf..682304c30b81 100644 --- a/trunk/drivers/net/wireless/wl12xx/rx.c +++ b/trunk/drivers/net/wireless/wl12xx/rx.c @@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl, */ wl->noise = desc->rssi - (desc->snr >> 1); - status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band); + status->freq = ieee80211_channel_to_frequency(desc->channel); if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; diff --git a/trunk/drivers/net/xen-netfront.c b/trunk/drivers/net/xen-netfront.c index da1f12120346..546de5749824 100644 --- a/trunk/drivers/net/xen-netfront.c +++ b/trunk/drivers/net/xen-netfront.c @@ -120,9 +120,6 @@ struct netfront_info { unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; - - /* Statistics */ - int rx_gso_checksum_fixup; }; struct netfront_rx_info { @@ -773,29 +770,11 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, return cons; } -static int checksum_setup(struct net_device *dev, struct sk_buff *skb) +static int skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; - int recalculate_partial_csum = 0; - - /* - * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy - * peers can fail to set NETRXF_csum_blank when sending a GSO - * frame. In this case force the SKB to CHECKSUM_PARTIAL and - * recalculate the partial checksum. - */ - if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { - struct netfront_info *np = netdev_priv(dev); - np->rx_gso_checksum_fixup++; - skb->ip_summed = CHECKSUM_PARTIAL; - recalculate_partial_csum = 1; - } - - /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; @@ -809,23 +788,9 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb) switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); - - if (recalculate_partial_csum) { - struct tcphdr *tcph = (struct tcphdr *)th; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - iph->ihl*4, - IPPROTO_TCP, 0); - } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); - - if (recalculate_partial_csum) { - struct udphdr *udph = (struct udphdr *)th; - udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - iph->ihl*4, - IPPROTO_UDP, 0); - } break; default: if (net_ratelimit()) @@ -864,11 +829,13 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); - if (checksum_setup(dev, skb)) { - kfree_skb(skb); - packets_dropped++; - dev->stats.rx_errors++; - continue; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_setup(skb)) { + kfree_skb(skb); + packets_dropped++; + dev->stats.rx_errors++; + continue; + } } dev->stats.rx_packets++; @@ -1665,59 +1632,12 @@ static void netback_changed(struct xenbus_device *dev, } } -static const struct xennet_stat { - char name[ETH_GSTRING_LEN]; - u16 offset; -} xennet_stats[] = { - { - "rx_gso_checksum_fixup", - offsetof(struct netfront_info, rx_gso_checksum_fixup) - }, -}; - -static int xennet_get_sset_count(struct net_device *dev, int string_set) -{ - switch (string_set) { - case ETH_SS_STATS: - return ARRAY_SIZE(xennet_stats); - default: - return -EINVAL; - } -} - -static void xennet_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) -{ - void *np = netdev_priv(dev); - int i; - - for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) - data[i] = *(int *)(np + xennet_stats[i].offset); -} - -static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, - xennet_stats[i].name, ETH_GSTRING_LEN); - break; - } -} - static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, - - .get_sset_count = xennet_get_sset_count, - .get_ethtool_stats = xennet_get_ethtool_stats, - .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS diff --git a/trunk/drivers/rapidio/rio-scan.c b/trunk/drivers/rapidio/rio-scan.c index a50391b6ba2a..467e82bd0929 100644 --- a/trunk/drivers/rapidio/rio-scan.c +++ b/trunk/drivers/rapidio/rio-scan.c @@ -943,8 +943,6 @@ static int rio_enum_complete(struct rio_mport *port) * @port: Master port to send transactions * @destid: Current destination ID in network * @hopcount: Number of hops into the network - * @prev: previous rio_dev - * @prev_port: previous port number * * Recursively discovers a RIO network. Transactions are sent via the * master port passed in @port. diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig index cdd97192dc69..4941cade319f 100644 --- a/trunk/drivers/rtc/Kconfig +++ b/trunk/drivers/rtc/Kconfig @@ -97,6 +97,18 @@ config RTC_INTF_DEV If unsure, say Y. +config RTC_INTF_DEV_UIE_EMUL + bool "RTC UIE emulation on dev interface" + depends on RTC_INTF_DEV + help + Provides an emulation for RTC_UIE if the underlying rtc chip + driver does not expose RTC_UIE ioctls. Those requests generate + once-per-second update interrupts, used for synchronization. + + The emulation code will read the time from the hardware + clock several times per second, please enable this option + only if you know that you really need it. + config RTC_DRV_TEST tristate "Test driver/device" help diff --git a/trunk/drivers/rtc/interface.c b/trunk/drivers/rtc/interface.c index 925006d33109..90384b9f6b2c 100644 --- a/trunk/drivers/rtc/interface.c +++ b/trunk/drivers/rtc/interface.c @@ -16,9 +16,6 @@ #include #include -static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); -static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); - static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; @@ -123,18 +120,12 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; - if (rtc->ops == NULL) - err = -ENODEV; - else if (!rtc->ops->read_alarm) - err = -EINVAL; - else { - memset(alarm, 0, sizeof(struct rtc_wkalrm)); - alarm->enabled = rtc->aie_timer.enabled; + alarm->enabled = rtc->aie_timer.enabled; + if (alarm->enabled) alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); - } mutex_unlock(&rtc->ops_lock); - return err; + return 0; } EXPORT_SYMBOL_GPL(rtc_read_alarm); @@ -184,14 +175,16 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; if (rtc->aie_timer.enabled) { rtc_timer_remove(rtc, &rtc->aie_timer); + rtc->aie_timer.enabled = 0; } rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); if (alarm->enabled) { - err = rtc_timer_enqueue(rtc, &rtc->aie_timer); + rtc->aie_timer.enabled = 1; + rtc_timer_enqueue(rtc, &rtc->aie_timer); } mutex_unlock(&rtc->ops_lock); - return err; + return 0; } EXPORT_SYMBOL_GPL(rtc_set_alarm); @@ -202,15 +195,15 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) return err; if (rtc->aie_timer.enabled != enabled) { - if (enabled) - err = rtc_timer_enqueue(rtc, &rtc->aie_timer); - else + if (enabled) { + rtc->aie_timer.enabled = 1; + rtc_timer_enqueue(rtc, &rtc->aie_timer); + } else { rtc_timer_remove(rtc, &rtc->aie_timer); + rtc->aie_timer.enabled = 0; + } } - if (err) - return err; - if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) @@ -242,9 +235,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) now = rtc_tm_to_ktime(tm); rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); - err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); - } else + rtc->uie_rtctimer.enabled = 1; + rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); + rtc->uie_rtctimer.enabled = 0; + } out: mutex_unlock(&rtc->ops_lock); @@ -492,13 +488,10 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq); * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * - * Sets the enabled bit on the added timer. - * * Must hold ops_lock for proper serialization of timerqueue */ -static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) +void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { - timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { struct rtc_wkalrm alarm; @@ -508,13 +501,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); - else if (err) { - timerqueue_del(&rtc->timerqueue, &timer->node); - timer->enabled = 0; - return err; - } } - return 0; } /** @@ -525,15 +512,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * - * Clears the enabled bit on the removed timer. - * * Must hold ops_lock for proper serialization of timerqueue */ -static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) +void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); - timer->enabled = 0; + if (next == &timer->node) { struct rtc_wkalrm alarm; int err; @@ -641,7 +626,8 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, timer->node.expires = expires; timer->period = period; - ret = rtc_timer_enqueue(rtc, timer); + timer->enabled = 1; + rtc_timer_enqueue(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; @@ -659,6 +645,7 @@ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); + timer->enabled = 0; mutex_unlock(&rtc->ops_lock); return ret; } diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 2a6bd9a4ae97..9f493ee4dcba 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -176,7 +176,6 @@ static void d_free(struct dentry *dentry) /** * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups - * @dentry: the target dentry * After this call, in-progress rcu-walk path lookup will fail. This * should be called after unhashing, and after changing d_inode (if * the dentry has not already been unhashed). @@ -282,7 +281,6 @@ static void dentry_lru_move_tail(struct dentry *dentry) /** * d_kill - kill dentry and return parent * @dentry: dentry to kill - * @parent: parent dentry * * The dentry must already be unhashed and removed from the LRU. * @@ -1975,7 +1973,7 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) /** * d_validate - verify dentry provided from insecure source (deprecated) * @dentry: The dentry alleged to be valid child of @dparent - * @dparent: The parent dentry (known to be valid) + * @parent: The parent dentry (known to be valid) * * An insecure source has sent us a dentry, here we verify it and dget() it. * This is used by ncpfs in its readdir implementation. diff --git a/trunk/include/asm-generic/vmlinux.lds.h b/trunk/include/asm-generic/vmlinux.lds.h index 6ebb81030d2d..68649336c4ad 100644 --- a/trunk/include/asm-generic/vmlinux.lds.h +++ b/trunk/include/asm-generic/vmlinux.lds.h @@ -364,13 +364,6 @@ VMLINUX_SYMBOL(__start___param) = .; \ *(__param) \ VMLINUX_SYMBOL(__stop___param) = .; \ - } \ - \ - /* Built-in module versions. */ \ - __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start___modver) = .; \ - *(__modver) \ - VMLINUX_SYMBOL(__stop___modver) = .; \ . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ diff --git a/trunk/include/linux/Kbuild b/trunk/include/linux/Kbuild index b0ada6f37dd6..2296d8b1931f 100644 --- a/trunk/include/linux/Kbuild +++ b/trunk/include/linux/Kbuild @@ -1,6 +1,5 @@ header-y += byteorder/ header-y += can/ -header-y += caif/ header-y += dvb/ header-y += hdlc/ header-y += isdn/ diff --git a/trunk/include/linux/audit.h b/trunk/include/linux/audit.h index 9d339eb27881..359df0487690 100644 --- a/trunk/include/linux/audit.h +++ b/trunk/include/linux/audit.h @@ -103,8 +103,6 @@ #define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ #define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ -#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ -#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ diff --git a/trunk/include/linux/caif/Kbuild b/trunk/include/linux/caif/Kbuild deleted file mode 100644 index a9cf250689dc..000000000000 --- a/trunk/include/linux/caif/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -header-y += caif_socket.h -header-y += if_caif.h diff --git a/trunk/include/linux/cpu_rmap.h b/trunk/include/linux/cpu_rmap.h deleted file mode 100644 index 473771a528c0..000000000000 --- a/trunk/include/linux/cpu_rmap.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * cpu_rmap.c: CPU affinity reverse-map support - * Copyright 2011 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#include -#include -#include - -/** - * struct cpu_rmap - CPU affinity reverse-map - * @size: Number of objects to be reverse-mapped - * @used: Number of objects added - * @obj: Pointer to array of object pointers - * @near: For each CPU, the index and distance to the nearest object, - * based on affinity masks - */ -struct cpu_rmap { - u16 size, used; - void **obj; - struct { - u16 index; - u16 dist; - } near[0]; -}; -#define CPU_RMAP_DIST_INF 0xffff - -extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); - -/** - * free_cpu_rmap - free CPU affinity reverse-map - * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL - */ -static inline void free_cpu_rmap(struct cpu_rmap *rmap) -{ - kfree(rmap); -} - -extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); -extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, - const struct cpumask *affinity); - -static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) -{ - return rmap->near[cpu].index; -} - -static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) -{ - return rmap->obj[rmap->near[cpu].index]; -} - -#ifdef CONFIG_GENERIC_HARDIRQS - -/** - * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs - * @size: Number of objects to be mapped - * - * Must be called in process context. - */ -static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size) -{ - return alloc_cpu_rmap(size, GFP_KERNEL); -} -extern void free_irq_cpu_rmap(struct cpu_rmap *rmap); - -extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq); - -#endif diff --git a/trunk/include/linux/dccp.h b/trunk/include/linux/dccp.h index d638e85dc501..010e2d87ed75 100644 --- a/trunk/include/linux/dccp.h +++ b/trunk/include/linux/dccp.h @@ -279,6 +279,8 @@ enum dccp_state { DCCP_MAX_STATES }; +#define DCCP_STATE_MASK 0x1f + enum { DCCPF_OPEN = TCPF_ESTABLISHED, DCCPF_REQUESTING = TCPF_SYN_SENT, diff --git a/trunk/include/linux/gfp.h b/trunk/include/linux/gfp.h index 0b84c61607e8..a3b148a91874 100644 --- a/trunk/include/linux/gfp.h +++ b/trunk/include/linux/gfp.h @@ -249,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags) ((1 << ZONES_SHIFT) - 1); if (__builtin_constant_p(bit)) - BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); else { #ifdef CONFIG_DEBUG_VM BUG_ON((GFP_ZONE_BAD >> bit) & 1); diff --git a/trunk/include/linux/if_link.h b/trunk/include/linux/if_link.h index f4a2e6b1b864..6485d2a89bec 100644 --- a/trunk/include/linux/if_link.h +++ b/trunk/include/linux/if_link.h @@ -135,7 +135,6 @@ enum { IFLA_VF_PORTS, IFLA_PORT_SELF, IFLA_AF_SPEC, - IFLA_GROUP, /* Group the device belongs to */ __IFLA_MAX }; diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index 63c5ad78e37c..55e0d4253e49 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -14,8 +14,6 @@ #include #include #include -#include -#include #include #include @@ -242,35 +240,6 @@ extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); - -/** - * struct irq_affinity_notify - context for notification of IRQ affinity changes - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use - * @work: Work item, for internal use - * @notify: Function to be called on change. This will be - * called in process context. - * @release: Function to be called on release. This will be - * called in process context. Once registered, the - * structure must only be freed when this function is - * called or later. - */ -struct irq_affinity_notify { - unsigned int irq; - struct kref kref; - struct work_struct work; - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); -}; - -extern int -irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); - -static inline void irq_run_affinity_notifiers(void) -{ - flush_scheduled_work(); -} - #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -286,7 +255,7 @@ static inline int irq_can_set_affinity(unsigned int irq) static inline int irq_select_affinity(unsigned int irq) { return 0; } static inline int irq_set_affinity_hint(unsigned int irq, - const struct cpumask *m) + const struct cpumask *m) { return -EINVAL; } diff --git a/trunk/include/linux/ip_vs.h b/trunk/include/linux/ip_vs.h index 4deb3834d62c..5f43a3b2e3ad 100644 --- a/trunk/include/linux/ip_vs.h +++ b/trunk/include/linux/ip_vs.h @@ -89,14 +89,6 @@ #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ -#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \ - IP_VS_CONN_F_NOOUTPUT | \ - IP_VS_CONN_F_INACTIVE | \ - IP_VS_CONN_F_SEQ_MASK | \ - IP_VS_CONN_F_NO_CPORT | \ - IP_VS_CONN_F_TEMPLATE \ - ) - /* Flags that are not sent to backup server start from bit 16 */ #define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ diff --git a/trunk/include/linux/irqdesc.h b/trunk/include/linux/irqdesc.h index bfef56dadddb..c1a95b7b58de 100644 --- a/trunk/include/linux/irqdesc.h +++ b/trunk/include/linux/irqdesc.h @@ -8,7 +8,6 @@ * For now it's included from */ -struct irq_affinity_notify; struct proc_dir_entry; struct timer_rand_state; /** @@ -25,7 +24,6 @@ struct timer_rand_state; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP - * @affinity_notify: context for notification of affinity changes * @pending_mask: pending rebalanced interrupts * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers @@ -72,7 +70,6 @@ struct irq_desc { raw_spinlock_t lock; #ifdef CONFIG_SMP const struct cpumask *affinity_hint; - struct irq_affinity_notify *affinity_notify; #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index e2f4d6af2125..d07d8057e440 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -575,6 +575,12 @@ struct sysinfo { char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */ }; +/* Force a compilation error if condition is true */ +#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) + +/* Force a compilation error if condition is constant and true */ +#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) + /* Force a compilation error if a constant expression is not a power of 2 */ #define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) @@ -586,32 +592,6 @@ struct sysinfo { #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) -/** - * BUILD_BUG_ON - break compile if a condition is true. - * @cond: the condition which the compiler should know is false. - * - * If you have some code which relies on certain constants being equal, or - * other compile-time-evaluated condition, you should use BUILD_BUG_ON to - * detect if someone changes it. - * - * The implementation uses gcc's reluctance to create a negative array, but - * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments - * to inline functions). So as a fallback we use the optimizer; if it can't - * prove the condition is false, it will cause a link error on the undefined - * "__build_bug_on_failed". This error message can be harder to track down - * though, hence the two different methods. - */ -#ifndef __OPTIMIZE__ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#else -extern int __build_bug_on_failed; -#define BUILD_BUG_ON(condition) \ - do { \ - ((void)sizeof(char[1 - 2*!!(condition)])); \ - if (condition) __build_bug_on_failed = 1; \ - } while(0) -#endif - /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/trunk/include/linux/kmemcheck.h b/trunk/include/linux/kmemcheck.h index 39f8453239f7..08d7dc4ddf40 100644 --- a/trunk/include/linux/kmemcheck.h +++ b/trunk/include/linux/kmemcheck.h @@ -76,7 +76,7 @@ bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); \ _n = (long) &((ptr)->name##_end) \ - (long) &((ptr)->name##_begin); \ - BUILD_BUG_ON(_n < 0); \ + MAYBE_BUILD_BUG_ON(_n < 0); \ \ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ } while (0) diff --git a/trunk/include/linux/module.h b/trunk/include/linux/module.h index e7c6385c6683..8b17fd8c790d 100644 --- a/trunk/include/linux/module.h +++ b/trunk/include/linux/module.h @@ -58,12 +58,6 @@ struct module_attribute { void (*free)(struct module *); }; -struct module_version_attribute { - struct module_attribute mattr; - const char *module_name; - const char *version; -}; - struct module_kobject { struct kobject kobj; @@ -167,28 +161,7 @@ extern struct module __this_module; Using this automatically adds a checksum of the .c files and the local headers in "srcversion". */ - -#if defined(MODULE) || !defined(CONFIG_SYSFS) #define MODULE_VERSION(_version) MODULE_INFO(version, _version) -#else -#define MODULE_VERSION(_version) \ - extern ssize_t __modver_version_show(struct module_attribute *, \ - struct module *, char *); \ - static struct module_version_attribute __modver_version_attr \ - __used \ - __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \ - = { \ - .mattr = { \ - .attr = { \ - .name = "version", \ - .mode = S_IRUGO, \ - }, \ - .show = __modver_version_show, \ - }, \ - .module_name = KBUILD_MODNAME, \ - .version = _version, \ - } -#endif /* Optional firmware file (or files) needed by the module * format is simply firmware file name. Multiple firmware diff --git a/trunk/include/linux/moduleparam.h b/trunk/include/linux/moduleparam.h index 07b41951e3fa..112adf8bd47d 100644 --- a/trunk/include/linux/moduleparam.h +++ b/trunk/include/linux/moduleparam.h @@ -16,17 +16,15 @@ /* Chosen so that structs with an unsigned long line up. */ #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) +#ifdef MODULE #define ___module_cat(a,b) __mod_ ## a ## b #define __module_cat(a,b) ___module_cat(a,b) -#ifdef MODULE #define __MODULE_INFO(tag, name, info) \ static const char __module_cat(name,__LINE__)[] \ __used __attribute__((section(".modinfo"), unused, aligned(1))) \ = __stringify(tag) "=" info #else /* !MODULE */ -/* This struct is here for syntactic coherency, it is not used */ -#define __MODULE_INFO(tag, name, info) \ - struct __module_cat(name,__LINE__) {} +#define __MODULE_INFO(tag, name, info) #endif #define __MODULE_PARM_TYPE(name, _type) \ __MODULE_INFO(parmtype, name##type, #name ":" _type) diff --git a/trunk/include/linux/mroute.h b/trunk/include/linux/mroute.h index b21d567692b2..0fa7a3a874c8 100644 --- a/trunk/include/linux/mroute.h +++ b/trunk/include/linux/mroute.h @@ -150,7 +150,6 @@ static inline int ip_mroute_opt(int opt) extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); extern int ip_mr_init(void); #else static inline diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index c7d707452228..d971346b0340 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -75,9 +75,6 @@ struct wireless_dev; #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ -/* Initial net device group. All devices belong to group 0 by default. */ -#define INIT_NETDEV_GROUP 0 - /* * Transmit return codes: transmit return codes originate from three different * namespaces: @@ -554,16 +551,14 @@ struct rps_map { #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) /* - * The rps_dev_flow structure contains the mapping of a flow to a CPU, the - * tail pointer for that CPU's input queue at the time of last enqueue, and - * a hardware filter index. + * The rps_dev_flow structure contains the mapping of a flow to a CPU and the + * tail pointer for that CPU's input queue at the time of last enqueue. */ struct rps_dev_flow { u16 cpu; - u16 filter; + u16 fill; unsigned int last_qtail; }; -#define RPS_NO_FILTER 0xffff /* * The rps_dev_flow_table structure contains a table of flow mappings. @@ -613,11 +608,6 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; -#ifdef CONFIG_RFS_ACCEL -extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, - u32 flow_id, u16 filter_id); -#endif - /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { struct rps_map __rcu *rps_map; @@ -653,14 +643,6 @@ struct xps_dev_maps { (nr_cpu_ids * sizeof(struct xps_map *))) #endif /* CONFIG_XPS */ -#define TC_MAX_QUEUE 16 -#define TC_BITMASK 15 -/* HW offloaded queuing disciplines txq count and offset maps */ -struct netdev_tc_txq { - u16 count; - u16 offset; -}; - /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -771,18 +753,6 @@ struct netdev_tc_txq { * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) - * Called to setup 'tc' number of traffic classes in the net device. This - * is always called from the stack with the rtnl lock held and netif tx - * queues stopped. This allows the netdevice to perform queue management - * safely. - * - * RFS acceleration. - * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, - * u16 rxq_index, u32 flow_id); - * Set hardware filter for RFS. rxq_index is the target queue index; - * flow_id is a flow ID to be passed to rps_may_expire_flow() later. - * Return the filter ID on success, or a negative error code. */ #define HAVE_NET_DEVICE_OPS struct net_device_ops { @@ -841,7 +811,6 @@ struct net_device_ops { struct nlattr *port[]); int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - int (*ndo_setup_tc)(struct net_device *dev, u8 tc); #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); @@ -856,12 +825,6 @@ struct net_device_ops { int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); #endif -#ifdef CONFIG_RFS_ACCEL - int (*ndo_rx_flow_steer)(struct net_device *dev, - const struct sk_buff *skb, - u16 rxq_index, - u32 flow_id); -#endif }; /* @@ -914,11 +877,7 @@ struct net_device { struct list_head unreg_list; /* Net device features */ - u32 features; - - /* VLAN feature mask */ - u32 vlan_features; - + unsigned long features; #define NETIF_F_SG 1 /* Scatter/gather IO. */ #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ @@ -1080,13 +1039,6 @@ struct net_device { /* Number of RX queues currently active in device */ unsigned int real_num_rx_queues; - -#ifdef CONFIG_RFS_ACCEL - /* CPU reverse-mapping for RX completion interrupts, indexed - * by RX queue number. Assigned by driver. This must only be - * set if the ndo_rx_flow_steer operation is defined. */ - struct cpu_rmap *rx_cpu_rmap; -#endif #endif rx_handler_func_t __rcu *rx_handler; @@ -1180,6 +1132,9 @@ struct net_device { /* rtnetlink link ops */ const struct rtnl_link_ops *rtnl_link_ops; + /* VLAN feature mask */ + unsigned long vlan_features; + /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; @@ -1188,9 +1143,6 @@ struct net_device { /* Data Center Bridging netlink ops */ const struct dcbnl_rtnl_ops *dcbnl_ops; #endif - u8 num_tc; - struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; - u8 prio_tc_map[TC_BITMASK + 1]; #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) /* max exchange id for FCoE LRO by ddp */ @@ -1201,65 +1153,11 @@ struct net_device { /* phy device may attach itself for hardware timestamping */ struct phy_device *phydev; - - /* group the device belongs to */ - int group; }; #define to_net_dev(d) container_of(d, struct net_device, dev) #define NETDEV_ALIGN 32 -static inline -int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) -{ - return dev->prio_tc_map[prio & TC_BITMASK]; -} - -static inline -int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) -{ - if (tc >= dev->num_tc) - return -EINVAL; - - dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; - return 0; -} - -static inline -void netdev_reset_tc(struct net_device *dev) -{ - dev->num_tc = 0; - memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); - memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); -} - -static inline -int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) -{ - if (tc >= dev->num_tc) - return -EINVAL; - - dev->tc_to_txq[tc].count = count; - dev->tc_to_txq[tc].offset = offset; - return 0; -} - -static inline -int netdev_set_num_tc(struct net_device *dev, u8 num_tc) -{ - if (num_tc > TC_MAX_QUEUE) - return -EINVAL; - - dev->num_tc = num_tc; - return 0; -} - -static inline -int netdev_get_num_tc(struct net_device *dev) -{ - return dev->num_tc; -} - static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) @@ -1402,7 +1300,7 @@ struct packet_type { struct packet_type *, struct net_device *); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + int features); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); @@ -1447,7 +1345,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev) struct net *net; net = dev_net(dev); - lh = rcu_dereference(list_next_rcu(&dev->dev_list)); + lh = rcu_dereference(dev->dev_list.next); return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } @@ -1457,13 +1355,6 @@ static inline struct net_device *first_net_device(struct net *net) net_device_entry(net->dev_base_head.next); } -static inline struct net_device *first_net_device_rcu(struct net *net) -{ - struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); - - return lh == &net->dev_base_head ? NULL : net_device_entry(lh); -} - extern int netdev_boot_setup_check(struct net_device *dev); extern unsigned long netdev_boot_base(const char *prefix, int unit); extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, @@ -1953,7 +1844,6 @@ extern int dev_set_alias(struct net_device *, const char *, size_t); extern int dev_change_net_namespace(struct net_device *, struct net *, const char *); extern int dev_set_mtu(struct net_device *, int); -extern void dev_set_group(struct net_device *, int); extern int dev_set_mac_address(struct net_device *, struct sockaddr *); extern int dev_hard_start_xmit(struct sk_buff *skb, @@ -2378,7 +2268,7 @@ extern int netdev_tstamp_prequeue; extern int weight_p; extern int netdev_set_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb); -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -2405,21 +2295,22 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l extern void linkwatch_run_queue(void); -u32 netdev_increment_features(u32 all, u32 one, u32 mask); -u32 netdev_fix_features(struct net_device *dev, u32 features); +unsigned long netdev_increment_features(unsigned long all, unsigned long one, + unsigned long mask); +unsigned long netdev_fix_features(unsigned long features, const char *name); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -u32 netif_skb_features(struct sk_buff *skb); +int netif_skb_features(struct sk_buff *skb); -static inline int net_gso_ok(u32 features, int gso_type) +static inline int net_gso_ok(int features, int gso_type) { int feature = gso_type << NETIF_F_GSO_SHIFT; return (features & feature) == feature; } -static inline int skb_gso_ok(struct sk_buff *skb, u32 features) +static inline int skb_gso_ok(struct sk_buff *skb, int features) { return net_gso_ok(features, skb_shinfo(skb)->gso_type) && (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); diff --git a/trunk/include/linux/netfilter.h b/trunk/include/linux/netfilter.h index eeec00abb664..1893837b3966 100644 --- a/trunk/include/linux/netfilter.h +++ b/trunk/include/linux/netfilter.h @@ -24,20 +24,16 @@ #define NF_MAX_VERDICT NF_STOP /* we overload the higher bits for encoding auxiliary data such as the queue - * number or errno values. Not nice, but better than additional function - * arguments. */ -#define NF_VERDICT_MASK 0x000000ff - -/* extra verdict flags have mask 0x0000ff00 */ -#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000 + * number. Not nice, but better than additional function arguments. */ +#define NF_VERDICT_MASK 0x0000ffff +#define NF_VERDICT_BITS 16 -/* queue number (NF_QUEUE) or errno (NF_DROP) */ #define NF_VERDICT_QMASK 0xffff0000 #define NF_VERDICT_QBITS 16 -#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE) +#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE) -#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP) +#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP) /* only for userspace compatibility */ #ifndef __KERNEL__ @@ -45,9 +41,6 @@ <= 0x2000 is used for protocol-flags. */ #define NFC_UNKNOWN 0x4000 #define NFC_ALTERED 0x8000 - -/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */ -#define NF_VERDICT_BITS 16 #endif enum nf_inet_hooks { @@ -79,10 +72,6 @@ union nf_inet_addr { #ifdef __KERNEL__ #ifdef CONFIG_NETFILTER -static inline int NF_DROP_GETERR(int verdict) -{ - return -(verdict >> NF_VERDICT_QBITS); -} static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) @@ -278,7 +267,7 @@ struct nf_afinfo { int route_key_size; }; -extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; +extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO]; static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) { return rcu_dereference(nf_afinfo[family]); @@ -368,9 +357,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) #endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu; +extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); -extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; +extern void (*nf_ct_destroy)(struct nf_conntrack *); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif diff --git a/trunk/include/linux/netfilter/Kbuild b/trunk/include/linux/netfilter/Kbuild index 89c0d1e20d72..9d40effe7ca7 100644 --- a/trunk/include/linux/netfilter/Kbuild +++ b/trunk/include/linux/netfilter/Kbuild @@ -9,7 +9,6 @@ header-y += nfnetlink_conntrack.h header-y += nfnetlink_log.h header-y += nfnetlink_queue.h header-y += x_tables.h -header-y += xt_AUDIT.h header-y += xt_CHECKSUM.h header-y += xt_CLASSIFY.h header-y += xt_CONNMARK.h @@ -56,7 +55,6 @@ header-y += xt_rateest.h header-y += xt_realm.h header-y += xt_recent.h header-y += xt_sctp.h -header-y += xt_socket.h header-y += xt_state.h header-y += xt_statistic.h header-y += xt_string.h diff --git a/trunk/include/linux/netfilter/nf_conntrack_snmp.h b/trunk/include/linux/netfilter/nf_conntrack_snmp.h deleted file mode 100644 index 064bc63a5346..000000000000 --- a/trunk/include/linux/netfilter/nf_conntrack_snmp.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _NF_CONNTRACK_SNMP_H -#define _NF_CONNTRACK_SNMP_H - -extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, - unsigned int protoff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo); - -#endif /* _NF_CONNTRACK_SNMP_H */ diff --git a/trunk/include/linux/netfilter/nfnetlink_conntrack.h b/trunk/include/linux/netfilter/nfnetlink_conntrack.h index debf1aefd753..19711e3ffd42 100644 --- a/trunk/include/linux/netfilter/nfnetlink_conntrack.h +++ b/trunk/include/linux/netfilter/nfnetlink_conntrack.h @@ -42,7 +42,6 @@ enum ctattr_type { CTA_SECMARK, /* obsolete */ CTA_ZONE, CTA_SECCTX, - CTA_TIMESTAMP, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) @@ -128,14 +127,6 @@ enum ctattr_counters { }; #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) -enum ctattr_tstamp { - CTA_TIMESTAMP_UNSPEC, - CTA_TIMESTAMP_START, - CTA_TIMESTAMP_STOP, - __CTA_TIMESTAMP_MAX -}; -#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1) - enum ctattr_nat { CTA_NAT_UNSPEC, CTA_NAT_MINIP, diff --git a/trunk/include/linux/netfilter/x_tables.h b/trunk/include/linux/netfilter/x_tables.h index 37219525ff6f..6712e713b299 100644 --- a/trunk/include/linux/netfilter/x_tables.h +++ b/trunk/include/linux/netfilter/x_tables.h @@ -611,9 +611,8 @@ struct _compat_xt_align { extern void xt_compat_lock(u_int8_t af); extern void xt_compat_unlock(u_int8_t af); -extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); +extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); extern void xt_compat_flush_offsets(u_int8_t af); -extern void xt_compat_init_offsets(u_int8_t af, unsigned int number); extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset); extern int xt_compat_match_offset(const struct xt_match *match); diff --git a/trunk/include/linux/netfilter/xt_AUDIT.h b/trunk/include/linux/netfilter/xt_AUDIT.h deleted file mode 100644 index 38751d2ea52b..000000000000 --- a/trunk/include/linux/netfilter/xt_AUDIT.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Header file for iptables xt_AUDIT target - * - * (C) 2010-2011 Thomas Graf - * (C) 2010-2011 Red Hat, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _XT_AUDIT_TARGET_H -#define _XT_AUDIT_TARGET_H - -#include - -enum { - XT_AUDIT_TYPE_ACCEPT = 0, - XT_AUDIT_TYPE_DROP, - XT_AUDIT_TYPE_REJECT, - __XT_AUDIT_TYPE_MAX, -}; - -#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1) - -struct xt_audit_info { - __u8 type; /* XT_AUDIT_TYPE_* */ -}; - -#endif /* _XT_AUDIT_TARGET_H */ diff --git a/trunk/include/linux/netfilter/xt_CT.h b/trunk/include/linux/netfilter/xt_CT.h index b56e76811c04..1b564106891d 100644 --- a/trunk/include/linux/netfilter/xt_CT.h +++ b/trunk/include/linux/netfilter/xt_CT.h @@ -1,16 +1,14 @@ #ifndef _XT_CT_H #define _XT_CT_H -#include - #define XT_CT_NOTRACK 0x1 struct xt_ct_target_info { - __u16 flags; - __u16 zone; - __u32 ct_events; - __u32 exp_events; - char helper[16]; + u_int16_t flags; + u_int16_t zone; + u_int32_t ct_events; + u_int32_t exp_events; + char helper[16]; /* Used internally by the kernel */ struct nf_conn *ct __attribute__((aligned(8))); diff --git a/trunk/include/linux/netfilter/xt_NFQUEUE.h b/trunk/include/linux/netfilter/xt_NFQUEUE.h index 9eafdbbb401c..2584f4a777de 100644 --- a/trunk/include/linux/netfilter/xt_NFQUEUE.h +++ b/trunk/include/linux/netfilter/xt_NFQUEUE.h @@ -20,10 +20,4 @@ struct xt_NFQ_info_v1 { __u16 queues_total; }; -struct xt_NFQ_info_v2 { - __u16 queuenum; - __u16 queues_total; - __u16 bypass; -}; - #endif /* _XT_NFQ_TARGET_H */ diff --git a/trunk/include/linux/netfilter/xt_TCPOPTSTRIP.h b/trunk/include/linux/netfilter/xt_TCPOPTSTRIP.h index 7157318499c2..2db543214ff5 100644 --- a/trunk/include/linux/netfilter/xt_TCPOPTSTRIP.h +++ b/trunk/include/linux/netfilter/xt_TCPOPTSTRIP.h @@ -1,15 +1,13 @@ #ifndef _XT_TCPOPTSTRIP_H #define _XT_TCPOPTSTRIP_H -#include - #define tcpoptstrip_set_bit(bmap, idx) \ (bmap[(idx) >> 5] |= 1U << (idx & 31)) #define tcpoptstrip_test_bit(bmap, idx) \ (((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0) struct xt_tcpoptstrip_target_info { - __u32 strip_bmap[8]; + u_int32_t strip_bmap[8]; }; #endif /* _XT_TCPOPTSTRIP_H */ diff --git a/trunk/include/linux/netfilter/xt_TPROXY.h b/trunk/include/linux/netfilter/xt_TPROXY.h index 902043c2073f..3f3d69361289 100644 --- a/trunk/include/linux/netfilter/xt_TPROXY.h +++ b/trunk/include/linux/netfilter/xt_TPROXY.h @@ -1,21 +1,19 @@ #ifndef _XT_TPROXY_H #define _XT_TPROXY_H -#include - /* TPROXY target is capable of marking the packet to perform * redirection. We can get rid of that whenever we get support for * mutliple targets in the same rule. */ struct xt_tproxy_target_info { - __u32 mark_mask; - __u32 mark_value; + u_int32_t mark_mask; + u_int32_t mark_value; __be32 laddr; __be16 lport; }; struct xt_tproxy_target_info_v1 { - __u32 mark_mask; - __u32 mark_value; + u_int32_t mark_mask; + u_int32_t mark_value; union nf_inet_addr laddr; __be16 lport; }; diff --git a/trunk/include/linux/netfilter/xt_cluster.h b/trunk/include/linux/netfilter/xt_cluster.h index 9b883c8fbf54..886682656f09 100644 --- a/trunk/include/linux/netfilter/xt_cluster.h +++ b/trunk/include/linux/netfilter/xt_cluster.h @@ -1,17 +1,15 @@ #ifndef _XT_CLUSTER_MATCH_H #define _XT_CLUSTER_MATCH_H -#include - enum xt_cluster_flags { XT_CLUSTER_F_INV = (1 << 0) }; struct xt_cluster_match_info { - __u32 total_nodes; - __u32 node_mask; - __u32 hash_seed; - __u32 flags; + u_int32_t total_nodes; + u_int32_t node_mask; + u_int32_t hash_seed; + u_int32_t flags; }; #define XT_CLUSTER_NODES_MAX 32 diff --git a/trunk/include/linux/netfilter/xt_comment.h b/trunk/include/linux/netfilter/xt_comment.h index 0ea5e79f5bd7..eacfedc6b5d0 100644 --- a/trunk/include/linux/netfilter/xt_comment.h +++ b/trunk/include/linux/netfilter/xt_comment.h @@ -4,7 +4,7 @@ #define XT_MAX_COMMENT_LEN 256 struct xt_comment_info { - char comment[XT_MAX_COMMENT_LEN]; + unsigned char comment[XT_MAX_COMMENT_LEN]; }; #endif /* XT_COMMENT_H */ diff --git a/trunk/include/linux/netfilter/xt_connlimit.h b/trunk/include/linux/netfilter/xt_connlimit.h index 0ca66e97acbc..7e3284bcbd2b 100644 --- a/trunk/include/linux/netfilter/xt_connlimit.h +++ b/trunk/include/linux/netfilter/xt_connlimit.h @@ -1,15 +1,8 @@ #ifndef _XT_CONNLIMIT_H #define _XT_CONNLIMIT_H -#include - struct xt_connlimit_data; -enum { - XT_CONNLIMIT_INVERT = 1 << 0, - XT_CONNLIMIT_DADDR = 1 << 1, -}; - struct xt_connlimit_info { union { union nf_inet_addr mask; @@ -20,14 +13,7 @@ struct xt_connlimit_info { }; #endif }; - unsigned int limit; - union { - /* revision 0 */ - unsigned int inverse; - - /* revision 1 */ - __u32 flags; - }; + unsigned int limit, inverse; /* Used internally by the kernel */ struct xt_connlimit_data *data __attribute__((aligned(8))); diff --git a/trunk/include/linux/netfilter/xt_conntrack.h b/trunk/include/linux/netfilter/xt_conntrack.h index 74b904d8f99c..54f47a2f6152 100644 --- a/trunk/include/linux/netfilter/xt_conntrack.h +++ b/trunk/include/linux/netfilter/xt_conntrack.h @@ -58,19 +58,4 @@ struct xt_conntrack_mtinfo2 { __u16 state_mask, status_mask; }; -struct xt_conntrack_mtinfo3 { - union nf_inet_addr origsrc_addr, origsrc_mask; - union nf_inet_addr origdst_addr, origdst_mask; - union nf_inet_addr replsrc_addr, replsrc_mask; - union nf_inet_addr repldst_addr, repldst_mask; - __u32 expires_min, expires_max; - __u16 l4proto; - __u16 origsrc_port, origdst_port; - __u16 replsrc_port, repldst_port; - __u16 match_flags, invert_flags; - __u16 state_mask, status_mask; - __u16 origsrc_port_high, origdst_port_high; - __u16 replsrc_port_high, repldst_port_high; -}; - #endif /*_XT_CONNTRACK_H*/ diff --git a/trunk/include/linux/netfilter/xt_quota.h b/trunk/include/linux/netfilter/xt_quota.h index ca6e03e47a17..b0d28c659ab7 100644 --- a/trunk/include/linux/netfilter/xt_quota.h +++ b/trunk/include/linux/netfilter/xt_quota.h @@ -1,8 +1,6 @@ #ifndef _XT_QUOTA_H #define _XT_QUOTA_H -#include - enum xt_quota_flags { XT_QUOTA_INVERT = 0x1, }; @@ -11,9 +9,9 @@ enum xt_quota_flags { struct xt_quota_priv; struct xt_quota_info { - __u32 flags; - __u32 pad; - aligned_u64 quota; + u_int32_t flags; + u_int32_t pad; + aligned_u64 quota; /* Used internally by the kernel */ struct xt_quota_priv *master; diff --git a/trunk/include/linux/netfilter/xt_socket.h b/trunk/include/linux/netfilter/xt_socket.h index 26d7217bd4f1..6f475b8ff34b 100644 --- a/trunk/include/linux/netfilter/xt_socket.h +++ b/trunk/include/linux/netfilter/xt_socket.h @@ -1,8 +1,6 @@ #ifndef _XT_SOCKET_H #define _XT_SOCKET_H -#include - enum { XT_SOCKET_TRANSPARENT = 1 << 0, }; diff --git a/trunk/include/linux/netfilter/xt_time.h b/trunk/include/linux/netfilter/xt_time.h index 7c37fac576c4..14b6df412c9f 100644 --- a/trunk/include/linux/netfilter/xt_time.h +++ b/trunk/include/linux/netfilter/xt_time.h @@ -1,16 +1,14 @@ #ifndef _XT_TIME_H #define _XT_TIME_H 1 -#include - struct xt_time_info { - __u32 date_start; - __u32 date_stop; - __u32 daytime_start; - __u32 daytime_stop; - __u32 monthdays_match; - __u8 weekdays_match; - __u8 flags; + u_int32_t date_start; + u_int32_t date_stop; + u_int32_t daytime_start; + u_int32_t daytime_stop; + u_int32_t monthdays_match; + u_int8_t weekdays_match; + u_int8_t flags; }; enum { diff --git a/trunk/include/linux/netfilter/xt_u32.h b/trunk/include/linux/netfilter/xt_u32.h index 04d1bfea03c2..9947f56cdbdd 100644 --- a/trunk/include/linux/netfilter/xt_u32.h +++ b/trunk/include/linux/netfilter/xt_u32.h @@ -1,8 +1,6 @@ #ifndef _XT_U32_H #define _XT_U32_H 1 -#include - enum xt_u32_ops { XT_U32_AND, XT_U32_LEFTSH, @@ -11,13 +9,13 @@ enum xt_u32_ops { }; struct xt_u32_location_element { - __u32 number; - __u8 nextop; + u_int32_t number; + u_int8_t nextop; }; struct xt_u32_value_element { - __u32 min; - __u32 max; + u_int32_t min; + u_int32_t max; }; /* @@ -29,14 +27,14 @@ struct xt_u32_value_element { struct xt_u32_test { struct xt_u32_location_element location[XT_U32_MAXSIZE+1]; struct xt_u32_value_element value[XT_U32_MAXSIZE+1]; - __u8 nnums; - __u8 nvalues; + u_int8_t nnums; + u_int8_t nvalues; }; struct xt_u32 { struct xt_u32_test tests[XT_U32_MAXSIZE+1]; - __u8 ntests; - __u8 invert; + u_int8_t ntests; + u_int8_t invert; }; #endif /* _XT_U32_H */ diff --git a/trunk/include/linux/netfilter_bridge/ebt_802_3.h b/trunk/include/linux/netfilter_bridge/ebt_802_3.h index be5be1577a56..c73ef0b18bdc 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_802_3.h +++ b/trunk/include/linux/netfilter_bridge/ebt_802_3.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_802_3_H #define __LINUX_BRIDGE_EBT_802_3_H -#include - #define EBT_802_3_SAP 0x01 #define EBT_802_3_TYPE 0x02 @@ -26,24 +24,24 @@ /* ui has one byte ctrl, ni has two */ struct hdr_ui { - __u8 dsap; - __u8 ssap; - __u8 ctrl; - __u8 orig[3]; + uint8_t dsap; + uint8_t ssap; + uint8_t ctrl; + uint8_t orig[3]; __be16 type; }; struct hdr_ni { - __u8 dsap; - __u8 ssap; + uint8_t dsap; + uint8_t ssap; __be16 ctrl; - __u8 orig[3]; + uint8_t orig[3]; __be16 type; }; struct ebt_802_3_hdr { - __u8 daddr[6]; - __u8 saddr[6]; + uint8_t daddr[6]; + uint8_t saddr[6]; __be16 len; union { struct hdr_ui ui; @@ -61,10 +59,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) #endif struct ebt_802_3_info { - __u8 sap; + uint8_t sap; __be16 type; - __u8 bitmask; - __u8 invflags; + uint8_t bitmask; + uint8_t invflags; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_among.h b/trunk/include/linux/netfilter_bridge/ebt_among.h index bd4e3ad0b706..0009558609a7 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_among.h +++ b/trunk/include/linux/netfilter_bridge/ebt_among.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_AMONG_H #define __LINUX_BRIDGE_EBT_AMONG_H -#include - #define EBT_AMONG_DST 0x01 #define EBT_AMONG_SRC 0x02 @@ -32,7 +30,7 @@ */ struct ebt_mac_wormhash_tuple { - __u32 cmp[2]; + uint32_t cmp[2]; __be32 ip; }; diff --git a/trunk/include/linux/netfilter_bridge/ebt_arp.h b/trunk/include/linux/netfilter_bridge/ebt_arp.h index 522f3e427f49..cbf4843b6b0f 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_arp.h +++ b/trunk/include/linux/netfilter_bridge/ebt_arp.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_ARP_H #define __LINUX_BRIDGE_EBT_ARP_H -#include - #define EBT_ARP_OPCODE 0x01 #define EBT_ARP_HTYPE 0x02 #define EBT_ARP_PTYPE 0x04 @@ -29,8 +27,8 @@ struct ebt_arp_info unsigned char smmsk[ETH_ALEN]; unsigned char dmaddr[ETH_ALEN]; unsigned char dmmsk[ETH_ALEN]; - __u8 bitmask; - __u8 invflags; + uint8_t bitmask; + uint8_t invflags; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_ip.h b/trunk/include/linux/netfilter_bridge/ebt_ip.h index c4bbc41b0ea4..6a708fb92241 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_ip.h +++ b/trunk/include/linux/netfilter_bridge/ebt_ip.h @@ -15,8 +15,6 @@ #ifndef __LINUX_BRIDGE_EBT_IP_H #define __LINUX_BRIDGE_EBT_IP_H -#include - #define EBT_IP_SOURCE 0x01 #define EBT_IP_DEST 0x02 #define EBT_IP_TOS 0x04 @@ -33,12 +31,12 @@ struct ebt_ip_info { __be32 daddr; __be32 smsk; __be32 dmsk; - __u8 tos; - __u8 protocol; - __u8 bitmask; - __u8 invflags; - __u16 sport[2]; - __u16 dport[2]; + uint8_t tos; + uint8_t protocol; + uint8_t bitmask; + uint8_t invflags; + uint16_t sport[2]; + uint16_t dport[2]; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_ip6.h b/trunk/include/linux/netfilter_bridge/ebt_ip6.h index 42b889682721..e5de98701519 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_ip6.h +++ b/trunk/include/linux/netfilter_bridge/ebt_ip6.h @@ -12,19 +12,14 @@ #ifndef __LINUX_BRIDGE_EBT_IP6_H #define __LINUX_BRIDGE_EBT_IP6_H -#include - #define EBT_IP6_SOURCE 0x01 #define EBT_IP6_DEST 0x02 #define EBT_IP6_TCLASS 0x04 #define EBT_IP6_PROTO 0x08 #define EBT_IP6_SPORT 0x10 #define EBT_IP6_DPORT 0x20 -#define EBT_IP6_ICMP6 0x40 - #define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\ - EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \ - EBT_IP6_ICMP6) + EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT) #define EBT_IP6_MATCH "ip6" /* the same values are used for the invflags */ @@ -33,18 +28,12 @@ struct ebt_ip6_info { struct in6_addr daddr; struct in6_addr smsk; struct in6_addr dmsk; - __u8 tclass; - __u8 protocol; - __u8 bitmask; - __u8 invflags; - union { - __u16 sport[2]; - __u8 icmpv6_type[2]; - }; - union { - __u16 dport[2]; - __u8 icmpv6_code[2]; - }; + uint8_t tclass; + uint8_t protocol; + uint8_t bitmask; + uint8_t invflags; + uint16_t sport[2]; + uint16_t dport[2]; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_limit.h b/trunk/include/linux/netfilter_bridge/ebt_limit.h index 66d80b30ba0e..4bf76b751676 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_limit.h +++ b/trunk/include/linux/netfilter_bridge/ebt_limit.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_LIMIT_H #define __LINUX_BRIDGE_EBT_LIMIT_H -#include - #define EBT_LIMIT_MATCH "limit" /* timings are in milliseconds. */ @@ -12,13 +10,13 @@ seconds, or one every 59 hours. */ struct ebt_limit_info { - __u32 avg; /* Average secs between packets * scale */ - __u32 burst; /* Period multiplier for upper limit. */ + u_int32_t avg; /* Average secs between packets * scale */ + u_int32_t burst; /* Period multiplier for upper limit. */ /* Used internally by the kernel */ unsigned long prev; - __u32 credit; - __u32 credit_cap, cost; + u_int32_t credit; + u_int32_t credit_cap, cost; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_log.h b/trunk/include/linux/netfilter_bridge/ebt_log.h index 7e7f1d1fe494..cc2cdfb764bc 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_log.h +++ b/trunk/include/linux/netfilter_bridge/ebt_log.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_LOG_H #define __LINUX_BRIDGE_EBT_LOG_H -#include - #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */ #define EBT_LOG_ARP 0x02 #define EBT_LOG_NFLOG 0x04 @@ -12,9 +10,9 @@ #define EBT_LOG_WATCHER "log" struct ebt_log_info { - __u8 loglevel; - __u8 prefix[EBT_LOG_PREFIX_SIZE]; - __u32 bitmask; + uint8_t loglevel; + uint8_t prefix[EBT_LOG_PREFIX_SIZE]; + uint32_t bitmask; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_mark_m.h b/trunk/include/linux/netfilter_bridge/ebt_mark_m.h index 410f9e5a71d4..9ceb10ec0ed6 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_mark_m.h +++ b/trunk/include/linux/netfilter_bridge/ebt_mark_m.h @@ -1,15 +1,13 @@ #ifndef __LINUX_BRIDGE_EBT_MARK_M_H #define __LINUX_BRIDGE_EBT_MARK_M_H -#include - #define EBT_MARK_AND 0x01 #define EBT_MARK_OR 0x02 #define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR) struct ebt_mark_m_info { unsigned long mark, mask; - __u8 invert; - __u8 bitmask; + uint8_t invert; + uint8_t bitmask; }; #define EBT_MARK_MATCH "mark_m" diff --git a/trunk/include/linux/netfilter_bridge/ebt_nflog.h b/trunk/include/linux/netfilter_bridge/ebt_nflog.h index df829fce9125..052817849b83 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_nflog.h +++ b/trunk/include/linux/netfilter_bridge/ebt_nflog.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_NFLOG_H #define __LINUX_BRIDGE_EBT_NFLOG_H -#include - #define EBT_NFLOG_MASK 0x0 #define EBT_NFLOG_PREFIX_SIZE 64 @@ -12,11 +10,11 @@ #define EBT_NFLOG_DEFAULT_THRESHOLD 1 struct ebt_nflog_info { - __u32 len; - __u16 group; - __u16 threshold; - __u16 flags; - __u16 pad; + u_int32_t len; + u_int16_t group; + u_int16_t threshold; + u_int16_t flags; + u_int16_t pad; char prefix[EBT_NFLOG_PREFIX_SIZE]; }; diff --git a/trunk/include/linux/netfilter_bridge/ebt_pkttype.h b/trunk/include/linux/netfilter_bridge/ebt_pkttype.h index c241badcd036..51a799840931 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_pkttype.h +++ b/trunk/include/linux/netfilter_bridge/ebt_pkttype.h @@ -1,11 +1,9 @@ #ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H #define __LINUX_BRIDGE_EBT_PKTTYPE_H -#include - struct ebt_pkttype_info { - __u8 pkt_type; - __u8 invert; + uint8_t pkt_type; + uint8_t invert; }; #define EBT_PKTTYPE_MATCH "pkttype" diff --git a/trunk/include/linux/netfilter_bridge/ebt_stp.h b/trunk/include/linux/netfilter_bridge/ebt_stp.h index 1025b9f5fb7d..e503a0aa2728 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_stp.h +++ b/trunk/include/linux/netfilter_bridge/ebt_stp.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_STP_H #define __LINUX_BRIDGE_EBT_STP_H -#include - #define EBT_STP_TYPE 0x0001 #define EBT_STP_FLAGS 0x0002 @@ -23,24 +21,24 @@ #define EBT_STP_MATCH "stp" struct ebt_stp_config_info { - __u8 flags; - __u16 root_priol, root_priou; + uint8_t flags; + uint16_t root_priol, root_priou; char root_addr[6], root_addrmsk[6]; - __u32 root_costl, root_costu; - __u16 sender_priol, sender_priou; + uint32_t root_costl, root_costu; + uint16_t sender_priol, sender_priou; char sender_addr[6], sender_addrmsk[6]; - __u16 portl, portu; - __u16 msg_agel, msg_ageu; - __u16 max_agel, max_ageu; - __u16 hello_timel, hello_timeu; - __u16 forward_delayl, forward_delayu; + uint16_t portl, portu; + uint16_t msg_agel, msg_ageu; + uint16_t max_agel, max_ageu; + uint16_t hello_timel, hello_timeu; + uint16_t forward_delayl, forward_delayu; }; struct ebt_stp_info { - __u8 type; + uint8_t type; struct ebt_stp_config_info config; - __u16 bitmask; - __u16 invflags; + uint16_t bitmask; + uint16_t invflags; }; #endif diff --git a/trunk/include/linux/netfilter_bridge/ebt_ulog.h b/trunk/include/linux/netfilter_bridge/ebt_ulog.h index 89a6becb5269..b677e2671541 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_ulog.h +++ b/trunk/include/linux/netfilter_bridge/ebt_ulog.h @@ -1,8 +1,6 @@ #ifndef _EBT_ULOG_H #define _EBT_ULOG_H -#include - #define EBT_ULOG_DEFAULT_NLGROUP 0 #define EBT_ULOG_DEFAULT_QTHRESHOLD 1 #define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */ @@ -12,7 +10,7 @@ #define EBT_ULOG_VERSION 1 struct ebt_ulog_info { - __u32 nlgroup; + uint32_t nlgroup; unsigned int cprange; unsigned int qthreshold; char prefix[EBT_ULOG_PREFIX_LEN]; diff --git a/trunk/include/linux/netfilter_bridge/ebt_vlan.h b/trunk/include/linux/netfilter_bridge/ebt_vlan.h index 967d1d5cf98d..1d98be4031e7 100644 --- a/trunk/include/linux/netfilter_bridge/ebt_vlan.h +++ b/trunk/include/linux/netfilter_bridge/ebt_vlan.h @@ -1,8 +1,6 @@ #ifndef __LINUX_BRIDGE_EBT_VLAN_H #define __LINUX_BRIDGE_EBT_VLAN_H -#include - #define EBT_VLAN_ID 0x01 #define EBT_VLAN_PRIO 0x02 #define EBT_VLAN_ENCAP 0x04 @@ -10,12 +8,12 @@ #define EBT_VLAN_MATCH "vlan" struct ebt_vlan_info { - __u16 id; /* VLAN ID {1-4095} */ - __u8 prio; /* VLAN User Priority {0-7} */ + uint16_t id; /* VLAN ID {1-4095} */ + uint8_t prio; /* VLAN User Priority {0-7} */ __be16 encap; /* VLAN Encapsulated frame code {0-65535} */ - __u8 bitmask; /* Args bitmask bit 1=1 - ID arg, + uint8_t bitmask; /* Args bitmask bit 1=1 - ID arg, bit 2=1 User-Priority arg, bit 3=1 encap*/ - __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg, + uint8_t invflags; /* Inverse bitmask bit 1=1 - inversed ID arg, bit 2=1 - inversed Pirority arg */ }; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/trunk/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h index c6a204c97047..e5a3687c8a72 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h @@ -1,8 +1,6 @@ #ifndef _IPT_CLUSTERIP_H_target #define _IPT_CLUSTERIP_H_target -#include - enum clusterip_hashmode { CLUSTERIP_HASHMODE_SIP = 0, CLUSTERIP_HASHMODE_SIP_SPT, @@ -19,15 +17,15 @@ struct clusterip_config; struct ipt_clusterip_tgt_info { - __u32 flags; + u_int32_t flags; /* only relevant for new ones */ - __u8 clustermac[6]; - __u16 num_total_nodes; - __u16 num_local_nodes; - __u16 local_nodes[CLUSTERIP_MAX_NODES]; - __u32 hash_mode; - __u32 hash_initval; + u_int8_t clustermac[6]; + u_int16_t num_total_nodes; + u_int16_t num_local_nodes; + u_int16_t local_nodes[CLUSTERIP_MAX_NODES]; + u_int32_t hash_mode; + u_int32_t hash_initval; /* Used internally by the kernel */ struct clusterip_config *config; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_ECN.h b/trunk/include/linux/netfilter_ipv4/ipt_ECN.h index bb88d5315a4d..7ca45918ab8e 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_ECN.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_ECN.h @@ -8,8 +8,6 @@ */ #ifndef _IPT_ECN_TARGET_H #define _IPT_ECN_TARGET_H - -#include #include #define IPT_ECN_IP_MASK (~XT_DSCP_MASK) @@ -21,11 +19,11 @@ #define IPT_ECN_OP_MASK 0xce struct ipt_ECN_info { - __u8 operation; /* bitset of operations */ - __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */ + u_int8_t operation; /* bitset of operations */ + u_int8_t ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */ union { struct { - __u8 ece:1, cwr:1; /* TCP ECT bits */ + u_int8_t ece:1, cwr:1; /* TCP ECT bits */ } tcp; } proto; }; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_SAME.h b/trunk/include/linux/netfilter_ipv4/ipt_SAME.h index 5bca78267afd..2529660c5b38 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_SAME.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_SAME.h @@ -1,17 +1,15 @@ #ifndef _IPT_SAME_H #define _IPT_SAME_H -#include - #define IPT_SAME_MAX_RANGE 10 #define IPT_SAME_NODST 0x01 struct ipt_same_info { unsigned char info; - __u32 rangesize; - __u32 ipnum; - __u32 *iparray; + u_int32_t rangesize; + u_int32_t ipnum; + u_int32_t *iparray; /* hangs off end. */ struct nf_nat_range range[IPT_SAME_MAX_RANGE]; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_TTL.h b/trunk/include/linux/netfilter_ipv4/ipt_TTL.h index f6ac169d92f9..ee6611edc112 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_TTL.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_TTL.h @@ -4,8 +4,6 @@ #ifndef _IPT_TTL_H #define _IPT_TTL_H -#include - enum { IPT_TTL_SET = 0, IPT_TTL_INC, @@ -15,8 +13,8 @@ enum { #define IPT_TTL_MAXMODE IPT_TTL_DEC struct ipt_TTL_info { - __u8 mode; - __u8 ttl; + u_int8_t mode; + u_int8_t ttl; }; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_addrtype.h b/trunk/include/linux/netfilter_ipv4/ipt_addrtype.h index 0da42237c8da..446de6aef983 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_addrtype.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_addrtype.h @@ -1,8 +1,6 @@ #ifndef _IPT_ADDRTYPE_H #define _IPT_ADDRTYPE_H -#include - enum { IPT_ADDRTYPE_INVERT_SOURCE = 0x0001, IPT_ADDRTYPE_INVERT_DEST = 0x0002, @@ -11,17 +9,17 @@ enum { }; struct ipt_addrtype_info_v1 { - __u16 source; /* source-type mask */ - __u16 dest; /* dest-type mask */ - __u32 flags; + u_int16_t source; /* source-type mask */ + u_int16_t dest; /* dest-type mask */ + u_int32_t flags; }; /* revision 0 */ struct ipt_addrtype_info { - __u16 source; /* source-type mask */ - __u16 dest; /* dest-type mask */ - __u32 invert_source; - __u32 invert_dest; + u_int16_t source; /* source-type mask */ + u_int16_t dest; /* dest-type mask */ + u_int32_t invert_source; + u_int32_t invert_dest; }; #endif diff --git a/trunk/include/linux/netfilter_ipv4/ipt_ah.h b/trunk/include/linux/netfilter_ipv4/ipt_ah.h index 4e02bb0119e3..2e555b4d05e3 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_ah.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_ah.h @@ -1,11 +1,9 @@ #ifndef _IPT_AH_H #define _IPT_AH_H -#include - struct ipt_ah { - __u32 spis[2]; /* Security Parameter Index */ - __u8 invflags; /* Inverse flags */ + u_int32_t spis[2]; /* Security Parameter Index */ + u_int8_t invflags; /* Inverse flags */ }; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_ecn.h b/trunk/include/linux/netfilter_ipv4/ipt_ecn.h index eabf95fb7d3e..9945baa4ccd7 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_ecn.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_ecn.h @@ -8,8 +8,6 @@ */ #ifndef _IPT_ECN_H #define _IPT_ECN_H - -#include #include #define IPT_ECN_IP_MASK (~XT_DSCP_MASK) @@ -22,12 +20,12 @@ /* match info */ struct ipt_ecn_info { - __u8 operation; - __u8 invert; - __u8 ip_ect; + u_int8_t operation; + u_int8_t invert; + u_int8_t ip_ect; union { struct { - __u8 ect; + u_int8_t ect; } tcp; } proto; }; diff --git a/trunk/include/linux/netfilter_ipv4/ipt_ttl.h b/trunk/include/linux/netfilter_ipv4/ipt_ttl.h index 37bee4442486..ee24fd86a3aa 100644 --- a/trunk/include/linux/netfilter_ipv4/ipt_ttl.h +++ b/trunk/include/linux/netfilter_ipv4/ipt_ttl.h @@ -4,8 +4,6 @@ #ifndef _IPT_TTL_H #define _IPT_TTL_H -#include - enum { IPT_TTL_EQ = 0, /* equals */ IPT_TTL_NE, /* not equals */ @@ -15,8 +13,8 @@ enum { struct ipt_ttl_info { - __u8 mode; - __u8 ttl; + u_int8_t mode; + u_int8_t ttl; }; diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_HL.h b/trunk/include/linux/netfilter_ipv6/ip6t_HL.h index ebd8ead1bb63..afb7813d45ab 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_HL.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_HL.h @@ -5,8 +5,6 @@ #ifndef _IP6T_HL_H #define _IP6T_HL_H -#include - enum { IP6T_HL_SET = 0, IP6T_HL_INC, @@ -16,8 +14,8 @@ enum { #define IP6T_HL_MAXMODE IP6T_HL_DEC struct ip6t_HL_info { - __u8 mode; - __u8 hop_limit; + u_int8_t mode; + u_int8_t hop_limit; }; diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_REJECT.h b/trunk/include/linux/netfilter_ipv6/ip6t_REJECT.h index 205ed62e4605..6be6504162bb 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_REJECT.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_REJECT.h @@ -1,8 +1,6 @@ #ifndef _IP6T_REJECT_H #define _IP6T_REJECT_H -#include - enum ip6t_reject_with { IP6T_ICMP6_NO_ROUTE, IP6T_ICMP6_ADM_PROHIBITED, @@ -14,7 +12,7 @@ enum ip6t_reject_with { }; struct ip6t_reject_info { - __u32 with; /* reject type */ + u_int32_t with; /* reject type */ }; #endif /*_IP6T_REJECT_H*/ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_ah.h b/trunk/include/linux/netfilter_ipv6/ip6t_ah.h index 5da2b65cb3ad..17a745cfb2c7 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_ah.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_ah.h @@ -1,13 +1,11 @@ #ifndef _IP6T_AH_H #define _IP6T_AH_H -#include - struct ip6t_ah { - __u32 spis[2]; /* Security Parameter Index */ - __u32 hdrlen; /* Header Length */ - __u8 hdrres; /* Test of the Reserved Filed */ - __u8 invflags; /* Inverse flags */ + u_int32_t spis[2]; /* Security Parameter Index */ + u_int32_t hdrlen; /* Header Length */ + u_int8_t hdrres; /* Test of the Reserved Filed */ + u_int8_t invflags; /* Inverse flags */ }; #define IP6T_AH_SPI 0x01 diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_frag.h b/trunk/include/linux/netfilter_ipv6/ip6t_frag.h index b47f61b9e082..3724d0850920 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_frag.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_frag.h @@ -1,13 +1,11 @@ #ifndef _IP6T_FRAG_H #define _IP6T_FRAG_H -#include - struct ip6t_frag { - __u32 ids[2]; /* Security Parameter Index */ - __u32 hdrlen; /* Header Length */ - __u8 flags; /* */ - __u8 invflags; /* Inverse flags */ + u_int32_t ids[2]; /* Security Parameter Index */ + u_int32_t hdrlen; /* Header Length */ + u_int8_t flags; /* */ + u_int8_t invflags; /* Inverse flags */ }; #define IP6T_FRAG_IDS 0x01 diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_hl.h b/trunk/include/linux/netfilter_ipv6/ip6t_hl.h index 6e76dbc6c19a..5ef91b8319a8 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_hl.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_hl.h @@ -5,8 +5,6 @@ #ifndef _IP6T_HL_H #define _IP6T_HL_H -#include - enum { IP6T_HL_EQ = 0, /* equals */ IP6T_HL_NE, /* not equals */ @@ -16,8 +14,8 @@ enum { struct ip6t_hl_info { - __u8 mode; - __u8 hop_limit; + u_int8_t mode; + u_int8_t hop_limit; }; diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/trunk/include/linux/netfilter_ipv6/ip6t_ipv6header.h index efae3a20c214..01dfd445596a 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_ipv6header.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_ipv6header.h @@ -8,12 +8,10 @@ on whether they contain certain headers */ #ifndef __IPV6HEADER_H #define __IPV6HEADER_H -#include - struct ip6t_ipv6header_info { - __u8 matchflags; - __u8 invflags; - __u8 modeflag; + u_int8_t matchflags; + u_int8_t invflags; + u_int8_t modeflag; }; #define MASK_HOPOPTS 128 diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_mh.h b/trunk/include/linux/netfilter_ipv6/ip6t_mh.h index a7729a5025cd..18549bca2d1f 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_mh.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_mh.h @@ -1,12 +1,10 @@ #ifndef _IP6T_MH_H #define _IP6T_MH_H -#include - /* MH matching stuff */ struct ip6t_mh { - __u8 types[2]; /* MH type range */ - __u8 invflags; /* Inverse flags */ + u_int8_t types[2]; /* MH type range */ + u_int8_t invflags; /* Inverse flags */ }; /* Values for "invflags" field in struct ip6t_mh. */ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_opts.h b/trunk/include/linux/netfilter_ipv6/ip6t_opts.h index 17d419a811fd..62d89bcd9f9c 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_opts.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_opts.h @@ -1,16 +1,14 @@ #ifndef _IP6T_OPTS_H #define _IP6T_OPTS_H -#include - #define IP6T_OPTS_OPTSNR 16 struct ip6t_opts { - __u32 hdrlen; /* Header Length */ - __u8 flags; /* */ - __u8 invflags; /* Inverse flags */ - __u16 opts[IP6T_OPTS_OPTSNR]; /* opts */ - __u8 optsnr; /* Nr of OPts */ + u_int32_t hdrlen; /* Header Length */ + u_int8_t flags; /* */ + u_int8_t invflags; /* Inverse flags */ + u_int16_t opts[IP6T_OPTS_OPTSNR]; /* opts */ + u_int8_t optsnr; /* Nr of OPts */ }; #define IP6T_OPTS_LEN 0x01 diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_rt.h b/trunk/include/linux/netfilter_ipv6/ip6t_rt.h index 7605a5ff81cd..ab91bfd2cd00 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_rt.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_rt.h @@ -1,19 +1,18 @@ #ifndef _IP6T_RT_H #define _IP6T_RT_H -#include /*#include */ #define IP6T_RT_HOPS 16 struct ip6t_rt { - __u32 rt_type; /* Routing Type */ - __u32 segsleft[2]; /* Segments Left */ - __u32 hdrlen; /* Header Length */ - __u8 flags; /* */ - __u8 invflags; /* Inverse flags */ + u_int32_t rt_type; /* Routing Type */ + u_int32_t segsleft[2]; /* Segments Left */ + u_int32_t hdrlen; /* Header Length */ + u_int8_t flags; /* */ + u_int8_t invflags; /* Inverse flags */ struct in6_addr addrs[IP6T_RT_HOPS]; /* Hops */ - __u8 addrnr; /* Nr of Addresses */ + u_int8_t addrnr; /* Nr of Addresses */ }; #define IP6T_RT_TYP 0x01 diff --git a/trunk/include/linux/pkt_sched.h b/trunk/include/linux/pkt_sched.h index 776cd93d5f7b..2cfa4bc8dea6 100644 --- a/trunk/include/linux/pkt_sched.h +++ b/trunk/include/linux/pkt_sched.h @@ -481,16 +481,4 @@ struct tc_drr_stats { __u32 deficit; }; -/* MQPRIO */ -#define TC_QOPT_BITMASK 15 -#define TC_QOPT_MAX_QUEUE 16 - -struct tc_mqprio_qopt { - __u8 num_tc; - __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; - __u8 hw; - __u16 count[TC_QOPT_MAX_QUEUE]; - __u16 offset[TC_QOPT_MAX_QUEUE]; -}; - #endif diff --git a/trunk/include/linux/rtc.h b/trunk/include/linux/rtc.h index a0b639f8e805..3c995b4d742c 100644 --- a/trunk/include/linux/rtc.h +++ b/trunk/include/linux/rtc.h @@ -235,6 +235,8 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); @@ -244,6 +246,8 @@ int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); +void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); +void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, ktime_t expires, ktime_t period); diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index 31f02d0b46a7..bf221d65d9ad 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -1801,15 +1801,6 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ skb = skb->prev) -#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ - for (skb = (queue)->prev, tmp = skb->prev; \ - skb != (struct sk_buff *)(queue); \ - skb = tmp, tmp = skb->prev) - -#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ - for (tmp = skb->prev; \ - skb != (struct sk_buff *)(queue); \ - skb = tmp, tmp = skb->prev) static inline bool skb_has_frag_list(const struct sk_buff *skb) { @@ -1877,7 +1868,7 @@ extern void skb_split(struct sk_buff *skb, extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); -extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) diff --git a/trunk/include/linux/virtio_config.h b/trunk/include/linux/virtio_config.h index 800617b4ddd5..0093dd7c1d6f 100644 --- a/trunk/include/linux/virtio_config.h +++ b/trunk/include/linux/virtio_config.h @@ -109,10 +109,7 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); - else - BUG_ON(fbit >= 32); + MAYBE_BUILD_BUG_ON(fbit >= 32); if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); diff --git a/trunk/include/net/bluetooth/hci_core.h b/trunk/include/net/bluetooth/hci_core.h index d2cf88407690..a29feb01854e 100644 --- a/trunk/include/net/bluetooth/hci_core.h +++ b/trunk/include/net/bluetooth/hci_core.h @@ -184,7 +184,6 @@ struct hci_conn { __u32 link_mode; __u8 auth_type; __u8 sec_level; - __u8 pending_sec_level; __u8 power_save; __u16 disc_timeout; unsigned long pend; diff --git a/trunk/include/net/cfg80211.h b/trunk/include/net/cfg80211.h index 679a0494b5f2..1322695beb52 100644 --- a/trunk/include/net/cfg80211.h +++ b/trunk/include/net/cfg80211.h @@ -1790,9 +1790,8 @@ static inline void *wdev_priv(struct wireless_dev *wdev) /** * ieee80211_channel_to_frequency - convert channel number to frequency * @chan: channel number - * @band: band, necessary due to channel number overlap */ -extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band); +extern int ieee80211_channel_to_frequency(int chan); /** * ieee80211_frequency_to_channel - convert frequency to channel number diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index 484f80b69ada..93b0310317be 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -40,10 +40,24 @@ struct dst_entry { struct rcu_head rcu_head; struct dst_entry *child; struct net_device *dev; - struct dst_ops *ops; - unsigned long _metrics; + short error; + short obsolete; + int flags; +#define DST_HOST 0x0001 +#define DST_NOXFRM 0x0002 +#define DST_NOPOLICY 0x0004 +#define DST_NOHASH 0x0008 +#define DST_NOCACHE 0x0010 unsigned long expires; + + unsigned short header_len; /* more space at head required */ + unsigned short trailer_len; /* space to reserve at tail */ + + unsigned int rate_tokens; + unsigned long rate_last; /* rate limiting for ICMP */ + struct dst_entry *path; + struct neighbour *neighbour; struct hh_cache *hh; #ifdef CONFIG_XFRM @@ -54,16 +68,17 @@ struct dst_entry { int (*input)(struct sk_buff*); int (*output)(struct sk_buff*); - short error; - short obsolete; - unsigned short header_len; /* more space at head required */ - unsigned short trailer_len; /* space to reserve at tail */ -#ifdef CONFIG_IP_ROUTE_CLASSID + struct dst_ops *ops; + + u32 _metrics[RTAX_MAX]; + +#ifdef CONFIG_NET_CLS_ROUTE __u32 tclassid; #else __u32 __pad2; #endif + /* * Align __refcnt to a 64 bytes alignment * (L1_CACHE_SIZE would be too much) @@ -78,14 +93,6 @@ struct dst_entry { atomic_t __refcnt; /* client references */ int __use; unsigned long lastuse; - unsigned long rate_last; /* rate limiting for ICMP */ - unsigned int rate_tokens; - int flags; -#define DST_HOST 0x0001 -#define DST_NOXFRM 0x0002 -#define DST_NOPOLICY 0x0004 -#define DST_NOHASH 0x0008 -#define DST_NOCACHE 0x0010 union { struct dst_entry *next; struct rtable __rcu *rt_next; @@ -96,70 +103,10 @@ struct dst_entry { #ifdef __KERNEL__ -extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[RTAX_MAX]; - -#define DST_METRICS_READ_ONLY 0x1UL -#define __DST_METRICS_PTR(Y) \ - ((u32 *)((Y) & ~DST_METRICS_READ_ONLY)) -#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) - -static inline bool dst_metrics_read_only(const struct dst_entry *dst) -{ - return dst->_metrics & DST_METRICS_READ_ONLY; -} - -extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); - -static inline void dst_destroy_metrics_generic(struct dst_entry *dst) -{ - unsigned long val = dst->_metrics; - if (!(val & DST_METRICS_READ_ONLY)) - __dst_destroy_metrics_generic(dst, val); -} - -static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) -{ - unsigned long p = dst->_metrics; - - if (p & DST_METRICS_READ_ONLY) - return dst->ops->cow_metrics(dst, p); - return __DST_METRICS_PTR(p); -} - -/* This may only be invoked before the entry has reached global - * visibility. - */ -static inline void dst_init_metrics(struct dst_entry *dst, - const u32 *src_metrics, - bool read_only) -{ - dst->_metrics = ((unsigned long) src_metrics) | - (read_only ? DST_METRICS_READ_ONLY : 0); -} - -static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) -{ - u32 *dst_metrics = dst_metrics_write_ptr(dest); - - if (dst_metrics) { - u32 *src_metrics = DST_METRICS_PTR(src); - - memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); - } -} - -static inline u32 *dst_metrics_ptr(struct dst_entry *dst) -{ - return DST_METRICS_PTR(dst); -} - static inline u32 dst_metric_raw(const struct dst_entry *dst, const int metric) { - u32 *p = DST_METRICS_PTR(dst); - - return p[metric-1]; + return dst->_metrics[metric-1]; } static inline u32 @@ -184,10 +131,22 @@ dst_metric_advmss(const struct dst_entry *dst) static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) { - u32 *p = dst_metrics_write_ptr(dst); + dst->_metrics[metric-1] = val; +} + +static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics) +{ + memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32)); +} - if (p) - p[metric-1] = val; +static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) +{ + dst_import_metrics(dest, src->_metrics); +} + +static inline u32 *dst_metrics_ptr(struct dst_entry *dst) +{ + return dst->_metrics; } static inline u32 diff --git a/trunk/include/net/dst_ops.h b/trunk/include/net/dst_ops.h index dc0746328947..21a320b8708e 100644 --- a/trunk/include/net/dst_ops.h +++ b/trunk/include/net/dst_ops.h @@ -18,7 +18,6 @@ struct dst_ops { struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*default_mtu)(const struct dst_entry *); - u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *dev, int how); diff --git a/trunk/include/net/flow.h b/trunk/include/net/flow.h index 1ae901f24436..240b7f356c71 100644 --- a/trunk/include/net/flow.h +++ b/trunk/include/net/flow.h @@ -48,8 +48,7 @@ struct flowi { __u8 proto; __u8 flags; -#define FLOWI_FLAG_ANYSRC 0x01 -#define FLOWI_FLAG_PRECOW_METRICS 0x02 +#define FLOWI_FLAG_ANYSRC 0x01 union { struct { __be16 sport; diff --git a/trunk/include/net/inet_sock.h b/trunk/include/net/inet_sock.h index 6e6dfd757682..8181498fa96c 100644 --- a/trunk/include/net/inet_sock.h +++ b/trunk/include/net/inet_sock.h @@ -219,13 +219,7 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { - __u8 flags = 0; - - if (inet_sk(sk)->transparent) - flags |= FLOWI_FLAG_ANYSRC; - if (sk->sk_protocol == IPPROTO_TCP) - flags |= FLOWI_FLAG_PRECOW_METRICS; - return flags; + return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0; } #endif /* _INET_SOCK_H */ diff --git a/trunk/include/net/inetpeer.h b/trunk/include/net/inetpeer.h index 61f2c66edb2a..599d96e74114 100644 --- a/trunk/include/net/inetpeer.h +++ b/trunk/include/net/inetpeer.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include @@ -34,8 +33,8 @@ struct inet_peer { atomic_t refcnt; /* * Once inet_peer is queued for deletion (refcnt == -1), following fields - * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics - * We can share memory with rcu_head to help keep inet_peer small. + * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp + * We can share memory with rcu_head to keep inet_peer small */ union { struct { @@ -43,7 +42,6 @@ struct inet_peer { atomic_t ip_id_count; /* IP ID for the next packet */ __u32 tcp_ts; __u32 tcp_ts_stamp; - u32 metrics[RTAX_MAX]; }; struct rcu_head rcu; }; @@ -51,13 +49,6 @@ struct inet_peer { void inet_initpeers(void) __init; -#define INETPEER_METRICS_NEW (~(u32) 0) - -static inline bool inet_metrics_new(const struct inet_peer *p) -{ - return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW; -} - /* can be called with or without local BH being disabled */ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create); diff --git a/trunk/include/net/ip_fib.h b/trunk/include/net/ip_fib.h index 2c0508a6e07c..07bdb5e9e8ac 100644 --- a/trunk/include/net/ip_fib.h +++ b/trunk/include/net/ip_fib.h @@ -55,7 +55,7 @@ struct fib_nh { int nh_weight; int nh_power; #endif -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE __u32 nh_tclassid; #endif int nh_oif; @@ -77,7 +77,7 @@ struct fib_info { int fib_protocol; __be32 fib_prefsrc; u32 fib_priority; - u32 *fib_metrics; + u32 fib_metrics[RTAX_MAX]; #define fib_mtu fib_metrics[RTAX_MTU-1] #define fib_window fib_metrics[RTAX_WINDOW-1] #define fib_rtt fib_metrics[RTAX_RTT-1] @@ -201,7 +201,7 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp, extern int __net_init fib4_rules_init(struct net *net); extern void __net_exit fib4_rules_exit(struct net *net); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE extern u32 fib_rules_tclass(struct fib_result *res); #endif @@ -235,7 +235,7 @@ extern struct fib_table *fib_hash_table(u32 id); static inline void fib_combine_itag(u32 *itag, struct fib_result *res) { -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE #ifdef CONFIG_IP_MULTIPLE_TABLES u32 rtag; #endif diff --git a/trunk/include/net/ip_vs.h b/trunk/include/net/ip_vs.h index b23bea62f708..b7bbd6c28cfa 100644 --- a/trunk/include/net/ip_vs.h +++ b/trunk/include/net/ip_vs.h @@ -28,80 +28,6 @@ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include #endif -#include /* Netw namespace */ - -/* - * Generic access of ipvs struct - */ -static inline struct netns_ipvs *net_ipvs(struct net* net) -{ - return net->ipvs; -} -/* - * Get net ptr from skb in traffic cases - * use skb_sknet when call is from userland (ioctl or netlink) - */ -static inline struct net *skb_net(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_NS -#ifdef CONFIG_IP_VS_DEBUG - /* - * This is used for debug only. - * Start with the most likely hit - * End with BUG - */ - if (likely(skb->dev && skb->dev->nd_net)) - return dev_net(skb->dev); - if (skb_dst(skb)->dev) - return dev_net(skb_dst(skb)->dev); - WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", - __func__, __LINE__); - if (likely(skb->sk && skb->sk->sk_net)) - return sock_net(skb->sk); - pr_err("There is no net ptr to find in the skb in %s() line:%d\n", - __func__, __LINE__); - BUG(); -#else - return dev_net(skb->dev ? : skb_dst(skb)->dev); -#endif -#else - return &init_net; -#endif -} - -static inline struct net *skb_sknet(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_NS -#ifdef CONFIG_IP_VS_DEBUG - /* Start with the most likely hit */ - if (likely(skb->sk && skb->sk->sk_net)) - return sock_net(skb->sk); - WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", - __func__, __LINE__); - if (likely(skb->dev && skb->dev->nd_net)) - return dev_net(skb->dev); - pr_err("There is no net ptr to find in the skb in %s() line:%d\n", - __func__, __LINE__); - BUG(); -#else - return sock_net(skb->sk); -#endif -#else - return &init_net; -#endif -} -/* - * This one needed for single_open_net since net is stored directly in - * private not as a struct i.e. seq_file_net cant be used. - */ -static inline struct net *seq_file_single_net(struct seq_file *seq) -{ -#ifdef CONFIG_NET_NS - return (struct net *)seq->private; -#else - return &init_net; -#endif -} /* Connections' size value needed by ip_vs_ctl.c */ extern int ip_vs_conn_tab_size; @@ -332,23 +258,6 @@ struct ip_vs_seq { before last resized pkt */ }; -/* - * counters per cpu - */ -struct ip_vs_counters { - __u32 conns; /* connections scheduled */ - __u32 inpkts; /* incoming packets */ - __u32 outpkts; /* outgoing packets */ - __u64 inbytes; /* incoming bytes */ - __u64 outbytes; /* outgoing bytes */ -}; -/* - * Stats per cpu - */ -struct ip_vs_cpu_stats { - struct ip_vs_counters ustats; - struct u64_stats_sync syncp; -}; /* * IPVS statistics objects @@ -370,34 +279,17 @@ struct ip_vs_estimator { }; struct ip_vs_stats { - struct ip_vs_stats_user ustats; /* statistics */ + struct ip_vs_stats_user ustats; /* statistics */ struct ip_vs_estimator est; /* estimator */ - struct ip_vs_cpu_stats *cpustats; /* per cpu counters */ - spinlock_t lock; /* spin lock */ -}; -/* - * Helper Macros for per cpu - * ipvs->tot_stats->ustats.count - */ -#define IPVS_STAT_INC(ipvs, count) \ - __this_cpu_inc((ipvs)->ustats->count) - -#define IPVS_STAT_ADD(ipvs, count, value) \ - do {\ - write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \ - raw_smp_processor_id())); \ - __this_cpu_add((ipvs)->ustats->count, value); \ - write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \ - raw_smp_processor_id())); \ - } while (0) + spinlock_t lock; /* spin lock */ +}; struct dst_entry; struct iphdr; struct ip_vs_conn; struct ip_vs_app; struct sk_buff; -struct ip_vs_proto_data; struct ip_vs_protocol { struct ip_vs_protocol *next; @@ -405,22 +297,21 @@ struct ip_vs_protocol { u16 protocol; u16 num_states; int dont_defrag; + atomic_t appcnt; /* counter of proto app incs */ + int *timeout_table; /* protocol timeout table */ void (*init)(struct ip_vs_protocol *pp); void (*exit)(struct ip_vs_protocol *pp); - void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); - - void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); - int (*conn_schedule)(int af, struct sk_buff *skb, - struct ip_vs_proto_data *pd, + struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp); struct ip_vs_conn * (*conn_in_get)(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse); @@ -428,6 +319,7 @@ struct ip_vs_protocol { struct ip_vs_conn * (*conn_out_get)(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse); @@ -445,11 +337,11 @@ struct ip_vs_protocol { int (*state_transition)(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, - struct ip_vs_proto_data *pd); + struct ip_vs_protocol *pp); - int (*register_app)(struct net *net, struct ip_vs_app *inc); + int (*register_app)(struct ip_vs_app *inc); - void (*unregister_app)(struct net *net, struct ip_vs_app *inc); + void (*unregister_app)(struct ip_vs_app *inc); int (*app_conn_bind)(struct ip_vs_conn *cp); @@ -458,26 +350,14 @@ struct ip_vs_protocol { int offset, const char *msg); - void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); -}; + void (*timeout_change)(struct ip_vs_protocol *pp, int flags); -/* - * protocol data per netns - */ -struct ip_vs_proto_data { - struct ip_vs_proto_data *next; - struct ip_vs_protocol *pp; - int *timeout_table; /* protocol timeout table */ - atomic_t appcnt; /* counter of proto app incs. */ - struct tcp_states_t *tcp_state_table; + int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to); }; -extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); -extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net, - unsigned short proto); +extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto); struct ip_vs_conn_param { - struct net *net; const union nf_inet_addr *caddr; const union nf_inet_addr *vaddr; __be16 cport; @@ -495,19 +375,16 @@ struct ip_vs_conn_param { */ struct ip_vs_conn { struct list_head c_list; /* hashed list heads */ -#ifdef CONFIG_NET_NS - struct net *net; /* Name space */ -#endif + /* Protocol, addresses and port numbers */ - u16 af; /* address family */ - __be16 cport; - __be16 vport; - __be16 dport; - __u32 fwmark; /* Fire wall mark from skb */ - union nf_inet_addr caddr; /* client address */ - union nf_inet_addr vaddr; /* virtual address */ - union nf_inet_addr daddr; /* destination address */ - volatile __u32 flags; /* status flags */ + u16 af; /* address family */ + union nf_inet_addr caddr; /* client address */ + union nf_inet_addr vaddr; /* virtual address */ + union nf_inet_addr daddr; /* destination address */ + volatile __u32 flags; /* status flags */ + __be16 cport; + __be16 vport; + __be16 dport; __u16 protocol; /* Which protocol (TCP/UDP) */ /* counter and timer */ @@ -545,38 +422,10 @@ struct ip_vs_conn { struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ - const struct ip_vs_pe *pe; char *pe_data; __u8 pe_data_len; }; -/* - * To save some memory in conn table when name space is disabled. - */ -static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) -{ -#ifdef CONFIG_NET_NS - return cp->net; -#else - return &init_net; -#endif -} -static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) -{ -#ifdef CONFIG_NET_NS - cp->net = net; -#endif -} - -static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, - struct net *net) -{ -#ifdef CONFIG_NET_NS - return cp->net == net; -#else - return 1; -#endif -} /* * Extended internal versions of struct ip_vs_service_user and @@ -636,7 +485,6 @@ struct ip_vs_service { unsigned flags; /* service status flags */ unsigned timeout; /* persistent timeout in ticks */ __be32 netmask; /* grouping granularity */ - struct net *net; struct list_head destinations; /* real server d-linked list */ __u32 num_dests; /* number of servers */ @@ -662,8 +510,8 @@ struct ip_vs_dest { struct list_head d_list; /* for table with all the dests */ u16 af; /* address family */ - __be16 port; /* port number of the server */ union nf_inet_addr addr; /* IP address of the server */ + __be16 port; /* port number of the server */ volatile unsigned flags; /* dest status flags */ atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ @@ -690,8 +538,8 @@ struct ip_vs_dest { /* for virtual service */ struct ip_vs_service *svc; /* service it belongs to */ __u16 protocol; /* which protocol (TCP/UDP) */ - __be16 vport; /* virtual port number */ union nf_inet_addr vaddr; /* virtual IP address */ + __be16 vport; /* virtual port number */ __u32 vfwmark; /* firewall mark of service */ }; @@ -826,14 +674,13 @@ enum { IP_VS_DIR_LAST, }; -static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, +static inline void ip_vs_conn_fill_param(int af, int protocol, const union nf_inet_addr *caddr, __be16 cport, const union nf_inet_addr *vaddr, __be16 vport, struct ip_vs_conn_param *p) { - p->net = net; p->af = af; p->protocol = protocol; p->caddr = caddr; @@ -848,6 +695,7 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse); @@ -855,6 +703,7 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse); @@ -870,14 +719,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, const union nf_inet_addr *daddr, __be16 dport, unsigned flags, - struct ip_vs_dest *dest, __u32 fwmark); + struct ip_vs_dest *dest); extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); extern const char * ip_vs_state_name(__u16 proto, int state); -extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp); +extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); extern int ip_vs_check_template(struct ip_vs_conn *ct); -extern void ip_vs_random_dropentry(struct net *net); +extern void ip_vs_random_dropentry(void); extern int ip_vs_conn_init(void); extern void ip_vs_conn_cleanup(void); @@ -947,12 +796,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 -extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app); -extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); +extern int register_ip_vs_app(struct ip_vs_app *app); +extern void unregister_ip_vs_app(struct ip_vs_app *app); extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); extern void ip_vs_unbind_app(struct ip_vs_conn *cp); -extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, - __u16 proto, __u16 port); +extern int +register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port); extern int ip_vs_app_inc_get(struct ip_vs_app *inc); extern void ip_vs_app_inc_put(struct ip_vs_app *inc); @@ -965,27 +814,15 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe); void ip_vs_unbind_pe(struct ip_vs_service *svc); int register_ip_vs_pe(struct ip_vs_pe *pe); int unregister_ip_vs_pe(struct ip_vs_pe *pe); -struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); -struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); - -static inline void ip_vs_pe_get(const struct ip_vs_pe *pe) -{ - if (pe && pe->module) - __module_get(pe->module); -} - -static inline void ip_vs_pe_put(const struct ip_vs_pe *pe) -{ - if (pe && pe->module) - module_put(pe->module); -} +extern struct ip_vs_pe *ip_vs_pe_get(const char *name); +extern void ip_vs_pe_put(struct ip_vs_pe *pe); /* * IPVS protocol functions (from ip_vs_proto.c) */ extern int ip_vs_protocol_init(void); extern void ip_vs_protocol_cleanup(void); -extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); +extern void ip_vs_protocol_timeout_change(int flags); extern int *ip_vs_create_timeout_table(int *table, int size); extern int ip_vs_set_state_timeout(int *table, int num, const char *const *names, @@ -1015,21 +852,26 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); extern struct ip_vs_conn * ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, - struct ip_vs_proto_data *pd, int *ignored); + struct ip_vs_protocol *pp, int *ignored); extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, - struct ip_vs_proto_data *pd); + struct ip_vs_protocol *pp); /* * IPVS control data and functions (from ip_vs_ctl.c) */ +extern int sysctl_ip_vs_cache_bypass; +extern int sysctl_ip_vs_expire_nodest_conn; +extern int sysctl_ip_vs_expire_quiescent_template; +extern int sysctl_ip_vs_sync_threshold[2]; +extern int sysctl_ip_vs_nat_icmp_send; +extern int sysctl_ip_vs_conntrack; +extern int sysctl_ip_vs_snat_reroute; extern struct ip_vs_stats ip_vs_stats; extern const struct ctl_path net_vs_ctl_path[]; -extern int sysctl_ip_vs_sync_ver; -extern void ip_vs_sync_switch_mode(struct net *net, int mode); extern struct ip_vs_service * -ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, +ip_vs_service_get(int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport); static inline void ip_vs_service_put(struct ip_vs_service *svc) @@ -1038,7 +880,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc) } extern struct ip_vs_dest * -ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, +ip_vs_lookup_real_service(int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); extern int ip_vs_use_count_inc(void); @@ -1046,9 +888,8 @@ extern void ip_vs_use_count_dec(void); extern int ip_vs_control_init(void); extern void ip_vs_control_cleanup(void); extern struct ip_vs_dest * -ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, - __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, - __u16 protocol, __u32 fwmark); +ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport, + const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol); extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); @@ -1056,12 +897,14 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); * IPVS sync daemon data and function prototypes * (from ip_vs_sync.c) */ -extern int start_sync_thread(struct net *net, int state, char *mcast_ifn, - __u8 syncid); -extern int stop_sync_thread(struct net *net, int state); -extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp); -extern int ip_vs_sync_init(void); -extern void ip_vs_sync_cleanup(void); +extern volatile int ip_vs_sync_state; +extern volatile int ip_vs_master_syncid; +extern volatile int ip_vs_backup_syncid; +extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; +extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; +extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid); +extern int stop_sync_thread(int state); +extern void ip_vs_sync_conn(struct ip_vs_conn *cp); /* @@ -1069,8 +912,8 @@ extern void ip_vs_sync_cleanup(void); */ extern int ip_vs_estimator_init(void); extern void ip_vs_estimator_cleanup(void); -extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats); -extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats); +extern void ip_vs_new_estimator(struct ip_vs_stats *stats); +extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); /* @@ -1112,13 +955,11 @@ extern int ip_vs_icmp_xmit_v6 extern int ip_vs_drop_rate; extern int ip_vs_drop_counter; -static inline int ip_vs_todrop(struct netns_ipvs *ipvs) +static __inline__ int ip_vs_todrop(void) { - if (!ipvs->drop_rate) - return 0; - if (--ipvs->drop_counter > 0) - return 0; - ipvs->drop_counter = ipvs->drop_rate; + if (!ip_vs_drop_rate) return 0; + if (--ip_vs_drop_counter > 0) return 0; + ip_vs_drop_counter = ip_vs_drop_rate; return 1; } @@ -1206,9 +1047,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb) * Netfilter connection tracking * (from ip_vs_nfct.c) */ -static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) +static inline int ip_vs_conntrack_enabled(void) { - return ipvs->sysctl_conntrack; + return sysctl_ip_vs_conntrack; } extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, @@ -1221,7 +1062,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); #else -static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) +static inline int ip_vs_conntrack_enabled(void) { return 0; } diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index d6b0045788ce..62c0ce2d1dc8 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -1147,17 +1147,6 @@ enum ieee80211_hw_flags { * @napi_weight: weight used for NAPI polling. You must specify an * appropriate value here if a napi_poll operation is provided * by your driver. - - * @max_rx_aggregation_subframes: maximum buffer size (number of - * sub-frames) to be used for A-MPDU block ack receiver - * aggregation. - * This is only relevant if the device has restrictions on the - * number of subframes, if it relies on mac80211 to do reordering - * it shouldn't be set. - * - * @max_tx_aggregation_subframes: maximum number of subframes in an - * aggregate an HT driver will transmit, used by the peer as a - * hint to size its reorder buffer. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -1176,8 +1165,6 @@ struct ieee80211_hw { u8 max_rates; u8 max_report_rates; u8 max_rate_tries; - u8 max_rx_aggregation_subframes; - u8 max_tx_aggregation_subframes; }; /** @@ -1736,10 +1723,6 @@ enum ieee80211_ampdu_mlme_action { * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) * is the first frame we expect to perform the action on. Notice * that TX/RX_STOP can pass NULL for this parameter. - * The @buf_size parameter is only valid when the action is set to - * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder - * buffer size (number of subframes) for this session -- aggregates - * containing more subframes than this may not be transmitted to the peer. * Returns a negative error code on failure. * The callback can sleep. * @@ -1842,8 +1825,7 @@ struct ieee80211_ops { int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size); + struct ieee80211_sta *sta, u16 tid, u16 *ssn); int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); diff --git a/trunk/include/net/net_namespace.h b/trunk/include/net/net_namespace.h index b3b4a34cb2cc..1bf812b21fb7 100644 --- a/trunk/include/net/net_namespace.h +++ b/trunk/include/net/net_namespace.h @@ -20,7 +20,6 @@ #include #endif #include -#include struct proc_dir_entry; struct net_device; @@ -95,7 +94,6 @@ struct net { #ifdef CONFIG_XFRM struct netns_xfrm xfrm; #endif - struct netns_ipvs *ipvs; }; diff --git a/trunk/include/net/netfilter/nf_conntrack.h b/trunk/include/net/netfilter/nf_conntrack.h index d0d13378991e..d85cff10e169 100644 --- a/trunk/include/net/netfilter/nf_conntrack.h +++ b/trunk/include/net/netfilter/nf_conntrack.h @@ -50,24 +50,11 @@ union nf_conntrack_expect_proto { /* per conntrack: application helper private data */ union nf_conntrack_help { /* insert conntrack helper private data (master) here */ -#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE) struct nf_ct_ftp_master ct_ftp_info; -#endif -#if defined(CONFIG_NF_CONNTRACK_PPTP) || \ - defined(CONFIG_NF_CONNTRACK_PPTP_MODULE) struct nf_ct_pptp_master ct_pptp_info; -#endif -#if defined(CONFIG_NF_CONNTRACK_H323) || \ - defined(CONFIG_NF_CONNTRACK_H323_MODULE) struct nf_ct_h323_master ct_h323_info; -#endif -#if defined(CONFIG_NF_CONNTRACK_SANE) || \ - defined(CONFIG_NF_CONNTRACK_SANE_MODULE) struct nf_ct_sane_master ct_sane_info; -#endif -#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE) struct nf_ct_sip_master ct_sip_info; -#endif }; #include @@ -129,14 +116,14 @@ struct nf_conn { u_int32_t secmark; #endif + /* Storage reserved for other modules: */ + union nf_conntrack_proto proto; + /* Extensions */ struct nf_ct_ext *ext; #ifdef CONFIG_NET_NS struct net *ct_net; #endif - - /* Storage reserved for other modules, must be the last member */ - union nf_conntrack_proto proto; }; static inline struct nf_conn * @@ -202,9 +189,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto); * Allocate a hashtable of hlist_head (if nulls == 0), * or hlist_nulls_head (if nulls == 1) */ -extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); +extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls); -extern void nf_ct_free_hashtable(void *hash, unsigned int size); +extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size); extern struct nf_conntrack_tuple_hash * __nf_conntrack_find(struct net *net, u16 zone, diff --git a/trunk/include/net/netfilter/nf_conntrack_ecache.h b/trunk/include/net/netfilter/nf_conntrack_ecache.h index 8fdb04b8cce0..96ba5f7dcab6 100644 --- a/trunk/include/net/netfilter/nf_conntrack_ecache.h +++ b/trunk/include/net/netfilter/nf_conntrack_ecache.h @@ -23,17 +23,12 @@ struct nf_conntrack_ecache { static inline struct nf_conntrack_ecache * nf_ct_ecache_find(const struct nf_conn *ct) { -#ifdef CONFIG_NF_CONNTRACK_EVENTS return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE); -#else - return NULL; -#endif } static inline struct nf_conntrack_ecache * nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) { -#ifdef CONFIG_NF_CONNTRACK_EVENTS struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; @@ -50,9 +45,6 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) e->expmask = expmask; } return e; -#else - return NULL; -#endif }; #ifdef CONFIG_NF_CONNTRACK_EVENTS @@ -67,7 +59,7 @@ struct nf_ct_event_notifier { int (*fcn)(unsigned int events, struct nf_ct_event *item); }; -extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; +extern struct nf_ct_event_notifier *nf_conntrack_event_cb; extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb); extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb); @@ -167,7 +159,7 @@ struct nf_exp_event_notifier { int (*fcn)(unsigned int events, struct nf_exp_event *item); }; -extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb; +extern struct nf_exp_event_notifier *nf_expect_event_cb; extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb); extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb); diff --git a/trunk/include/net/netfilter/nf_conntrack_extend.h b/trunk/include/net/netfilter/nf_conntrack_extend.h index 2dcf31703acb..0772d296dfdb 100644 --- a/trunk/include/net/netfilter/nf_conntrack_extend.h +++ b/trunk/include/net/netfilter/nf_conntrack_extend.h @@ -7,19 +7,10 @@ enum nf_ct_ext_id { NF_CT_EXT_HELPER, -#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) NF_CT_EXT_NAT, -#endif NF_CT_EXT_ACCT, -#ifdef CONFIG_NF_CONNTRACK_EVENTS NF_CT_EXT_ECACHE, -#endif -#ifdef CONFIG_NF_CONNTRACK_ZONES NF_CT_EXT_ZONE, -#endif -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP - NF_CT_EXT_TSTAMP, -#endif NF_CT_EXT_NUM, }; @@ -28,7 +19,6 @@ enum nf_ct_ext_id { #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone -#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { diff --git a/trunk/include/net/netfilter/nf_conntrack_helper.h b/trunk/include/net/netfilter/nf_conntrack_helper.h index f1c1311adc2c..32c305dbdab6 100644 --- a/trunk/include/net/netfilter/nf_conntrack_helper.h +++ b/trunk/include/net/netfilter/nf_conntrack_helper.h @@ -63,10 +63,4 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) extern int nf_conntrack_helper_init(void); extern void nf_conntrack_helper_fini(void); -extern int nf_conntrack_broadcast_help(struct sk_buff *skb, - unsigned int protoff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int timeout); - #endif /*_NF_CONNTRACK_HELPER_H*/ diff --git a/trunk/include/net/netfilter/nf_conntrack_l3proto.h b/trunk/include/net/netfilter/nf_conntrack_l3proto.h index e8010f445ae1..a7547611e8f1 100644 --- a/trunk/include/net/netfilter/nf_conntrack_l3proto.h +++ b/trunk/include/net/netfilter/nf_conntrack_l3proto.h @@ -73,7 +73,7 @@ struct nf_conntrack_l3proto { struct module *me; }; -extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX]; +extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX]; /* Protocol registration. */ extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); diff --git a/trunk/include/net/netfilter/nf_conntrack_timestamp.h b/trunk/include/net/netfilter/nf_conntrack_timestamp.h deleted file mode 100644 index fc9c82b1f06b..000000000000 --- a/trunk/include/net/netfilter/nf_conntrack_timestamp.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef _NF_CONNTRACK_TSTAMP_H -#define _NF_CONNTRACK_TSTAMP_H - -#include -#include -#include -#include -#include - -struct nf_conn_tstamp { - u_int64_t start; - u_int64_t stop; -}; - -static inline -struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct) -{ -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP - return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP); -#else - return NULL; -#endif -} - -static inline -struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) -{ -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP - struct net *net = nf_ct_net(ct); - - if (!net->ct.sysctl_tstamp) - return NULL; - - return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp); -#else - return NULL; -#endif -}; - -static inline bool nf_ct_tstamp_enabled(struct net *net) -{ - return net->ct.sysctl_tstamp != 0; -} - -static inline void nf_ct_set_tstamp(struct net *net, bool enable) -{ - net->ct.sysctl_tstamp = enable; -} - -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP -extern int nf_conntrack_tstamp_init(struct net *net); -extern void nf_conntrack_tstamp_fini(struct net *net); -#else -static inline int nf_conntrack_tstamp_init(struct net *net) -{ - return 0; -} - -static inline void nf_conntrack_tstamp_fini(struct net *net) -{ - return; -} -#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */ - -#endif /* _NF_CONNTRACK_TSTAMP_H */ diff --git a/trunk/include/net/netfilter/nf_nat.h b/trunk/include/net/netfilter/nf_nat.h index aff80b190c12..f5f09f032a90 100644 --- a/trunk/include/net/netfilter/nf_nat.h +++ b/trunk/include/net/netfilter/nf_nat.h @@ -56,9 +56,7 @@ struct nf_nat_multi_range_compat { /* per conntrack: nat application helper private data */ union nf_conntrack_nat_help { /* insert nat helper private data here */ -#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE) struct nf_nat_pptp nat_pptp_info; -#endif }; struct nf_conn; @@ -86,11 +84,7 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) { -#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) return nf_ct_ext_find(ct, NF_CT_EXT_NAT); -#else - return NULL; -#endif } #else /* !__KERNEL__: iptables wants this to compile. */ diff --git a/trunk/include/net/netfilter/nf_nat_core.h b/trunk/include/net/netfilter/nf_nat_core.h index 3dc7b98effeb..33602ab66190 100644 --- a/trunk/include/net/netfilter/nf_nat_core.h +++ b/trunk/include/net/netfilter/nf_nat_core.h @@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) { if (manip == IP_NAT_MANIP_SRC) - return ct->status & IPS_SRC_NAT_DONE; + return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); else - return ct->status & IPS_DST_NAT_DONE; + return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status); } struct nlattr; diff --git a/trunk/include/net/netns/conntrack.h b/trunk/include/net/netns/conntrack.h index 341eb089349e..d4958d4c6574 100644 --- a/trunk/include/net/netns/conntrack.h +++ b/trunk/include/net/netns/conntrack.h @@ -21,15 +21,15 @@ struct netns_ct { int sysctl_events; unsigned int sysctl_events_retry_timeout; int sysctl_acct; - int sysctl_tstamp; int sysctl_checksum; unsigned int sysctl_log_invalid; /* Log invalid packets */ #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; - struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; #endif + int hash_vmalloc; + int expect_vmalloc; char *slabname; }; #endif diff --git a/trunk/include/net/netns/ip_vs.h b/trunk/include/net/netns/ip_vs.h deleted file mode 100644 index 259ebac904bf..000000000000 --- a/trunk/include/net/netns/ip_vs.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * IP Virtual Server - * Data structure for network namspace - * - */ - -#ifndef IP_VS_H_ -#define IP_VS_H_ - -#include -#include -#include -#include -#include -#include - -struct ip_vs_stats; -struct ip_vs_sync_buff; -struct ctl_table_header; - -struct netns_ipvs { - int gen; /* Generation */ - /* - * Hash table: for real service lookups - */ - #define IP_VS_RTAB_BITS 4 - #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) - #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) - - struct list_head rs_table[IP_VS_RTAB_SIZE]; - /* ip_vs_app */ - struct list_head app_list; - struct mutex app_mutex; - struct lock_class_key app_key; /* mutex debuging */ - - /* ip_vs_proto */ - #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ - struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; - /* ip_vs_proto_tcp */ -#ifdef CONFIG_IP_VS_PROTO_TCP - #define TCP_APP_TAB_BITS 4 - #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) - #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) - struct list_head tcp_apps[TCP_APP_TAB_SIZE]; - spinlock_t tcp_app_lock; -#endif - /* ip_vs_proto_udp */ -#ifdef CONFIG_IP_VS_PROTO_UDP - #define UDP_APP_TAB_BITS 4 - #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) - #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) - struct list_head udp_apps[UDP_APP_TAB_SIZE]; - spinlock_t udp_app_lock; -#endif - /* ip_vs_proto_sctp */ -#ifdef CONFIG_IP_VS_PROTO_SCTP - #define SCTP_APP_TAB_BITS 4 - #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) - #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) - /* Hash table for SCTP application incarnations */ - struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; - spinlock_t sctp_app_lock; -#endif - /* ip_vs_conn */ - atomic_t conn_count; /* connection counter */ - - /* ip_vs_ctl */ - struct ip_vs_stats *tot_stats; /* Statistics & est. */ - struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */ - seqcount_t *ustats_seq; /* u64 read retry */ - - int num_services; /* no of virtual services */ - /* 1/rate drop and drop-entry variables */ - struct delayed_work defense_work; /* Work handler */ - int drop_rate; - int drop_counter; - atomic_t dropentry; - /* locks in ctl.c */ - spinlock_t dropentry_lock; /* drop entry handling */ - spinlock_t droppacket_lock; /* drop packet handling */ - spinlock_t securetcp_lock; /* state and timeout tables */ - rwlock_t rs_lock; /* real services table */ - /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ - struct lock_class_key ctl_key; /* ctl_mutex debuging */ - /* Trash for destinations */ - struct list_head dest_trash; - /* Service counters */ - atomic_t ftpsvc_counter; - atomic_t nullsvc_counter; - - /* sys-ctl struct */ - struct ctl_table_header *sysctl_hdr; - struct ctl_table *sysctl_tbl; - /* sysctl variables */ - int sysctl_amemthresh; - int sysctl_am_droprate; - int sysctl_drop_entry; - int sysctl_drop_packet; - int sysctl_secure_tcp; -#ifdef CONFIG_IP_VS_NFCT - int sysctl_conntrack; -#endif - int sysctl_snat_reroute; - int sysctl_sync_ver; - int sysctl_cache_bypass; - int sysctl_expire_nodest_conn; - int sysctl_expire_quiescent_template; - int sysctl_sync_threshold[2]; - int sysctl_nat_icmp_send; - - /* ip_vs_lblc */ - int sysctl_lblc_expiration; - struct ctl_table_header *lblc_ctl_header; - struct ctl_table *lblc_ctl_table; - /* ip_vs_lblcr */ - int sysctl_lblcr_expiration; - struct ctl_table_header *lblcr_ctl_header; - struct ctl_table *lblcr_ctl_table; - /* ip_vs_est */ - struct list_head est_list; /* estimator list */ - spinlock_t est_lock; - struct timer_list est_timer; /* Estimation timer */ - /* ip_vs_sync */ - struct list_head sync_queue; - spinlock_t sync_lock; - struct ip_vs_sync_buff *sync_buff; - spinlock_t sync_buff_lock; - struct sockaddr_in sync_mcast_addr; - struct task_struct *master_thread; - struct task_struct *backup_thread; - int send_mesg_maxlen; - int recv_mesg_maxlen; - volatile int sync_state; - volatile int master_syncid; - volatile int backup_syncid; - /* multicast interface name */ - char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; - char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; - /* net name space ptr */ - struct net *net; /* Needed by timer routines */ -}; - -#endif /* IP_VS_H_ */ diff --git a/trunk/include/net/netns/ipv4.h b/trunk/include/net/netns/ipv4.h index e2e2ef57eca2..d68c3f121774 100644 --- a/trunk/include/net/netns/ipv4.h +++ b/trunk/include/net/netns/ipv4.h @@ -43,6 +43,7 @@ struct netns_ipv4 { struct xt_table *nat_table; struct hlist_head *nat_bysource; unsigned int nat_htable_size; + int nat_vmalloced; #endif int sysctl_icmp_echo_ignore_all; diff --git a/trunk/include/net/protocol.h b/trunk/include/net/protocol.h index 6f7eb800974a..dc07495bce4c 100644 --- a/trunk/include/net/protocol.h +++ b/trunk/include/net/protocol.h @@ -38,7 +38,7 @@ struct net_protocol { void (*err_handler)(struct sk_buff *skb, u32 info); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + int features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); @@ -57,7 +57,7 @@ struct inet6_protocol { int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + int features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); diff --git a/trunk/include/net/route.h b/trunk/include/net/route.h index e5864658dc76..93e10c453f6b 100644 --- a/trunk/include/net/route.h +++ b/trunk/include/net/route.h @@ -49,7 +49,6 @@ struct fib_nh; struct inet_peer; -struct fib_info; struct rtable { struct dst_entry dst; @@ -70,7 +69,6 @@ struct rtable { /* Miscellaneous cached information */ __be32 rt_spec_dst; /* RFC1122 specific destination */ struct inet_peer *peer; /* long-living peer info */ - struct fib_info *fi; /* for client ref to shared metrics */ }; static inline bool rt_is_input_route(struct rtable *rt) @@ -182,8 +180,6 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst, if (inet_sk(sk)->transparent) fl.flags |= FLOWI_FLAG_ANYSRC; - if (protocol == IPPROTO_TCP) - fl.flags |= FLOWI_FLAG_PRECOW_METRICS; if (!dst || !src) { err = __ip_route_output_key(net, rp, &fl); @@ -211,8 +207,6 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol, fl.proto = protocol; if (inet_sk(sk)->transparent) fl.flags |= FLOWI_FLAG_ANYSRC; - if (protocol == IPPROTO_TCP) - fl.flags |= FLOWI_FLAG_PRECOW_METRICS; ip_rt_put(*rp); *rp = NULL; security_sk_classify_flow(sk, &fl); diff --git a/trunk/include/net/sch_generic.h b/trunk/include/net/sch_generic.h index 16626a04cb03..e9eee99d8b1f 100644 --- a/trunk/include/net/sch_generic.h +++ b/trunk/include/net/sch_generic.h @@ -31,12 +31,10 @@ enum qdisc_state_t { * following bits are only changed while qdisc lock is held */ enum qdisc___state_t { - __QDISC___STATE_RUNNING = 1, - __QDISC___STATE_THROTTLED = 2, + __QDISC___STATE_RUNNING, }; struct qdisc_size_table { - struct rcu_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; @@ -48,13 +46,14 @@ struct Qdisc { struct sk_buff * (*dequeue)(struct Qdisc *dev); unsigned flags; #define TCQ_F_BUILTIN 1 -#define TCQ_F_INGRESS 2 -#define TCQ_F_CAN_BYPASS 4 -#define TCQ_F_MQROOT 8 +#define TCQ_F_THROTTLED 2 +#define TCQ_F_INGRESS 4 +#define TCQ_F_CAN_BYPASS 8 +#define TCQ_F_MQROOT 16 #define TCQ_F_WARN_NONWC (1 << 16) int padded; struct Qdisc_ops *ops; - struct qdisc_size_table __rcu *stab; + struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; @@ -79,43 +78,25 @@ struct Qdisc { unsigned long state; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; - unsigned int __state; + unsigned long __state; struct gnet_stats_queue qstats; struct rcu_head rcu_head; spinlock_t busylock; }; -static inline bool qdisc_is_running(const struct Qdisc *qdisc) +static inline bool qdisc_is_running(struct Qdisc *qdisc) { - return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; + return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); } static inline bool qdisc_run_begin(struct Qdisc *qdisc) { - if (qdisc_is_running(qdisc)) - return false; - qdisc->__state |= __QDISC___STATE_RUNNING; - return true; + return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); } static inline void qdisc_run_end(struct Qdisc *qdisc) { - qdisc->__state &= ~__QDISC___STATE_RUNNING; -} - -static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) -{ - return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false; -} - -static inline void qdisc_throttled(struct Qdisc *qdisc) -{ - qdisc->__state |= __QDISC___STATE_THROTTLED; -} - -static inline void qdisc_unthrottled(struct Qdisc *qdisc) -{ - qdisc->__state &= ~__QDISC___STATE_THROTTLED; + __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); } struct Qdisc_class_ops { @@ -350,8 +331,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, struct Qdisc_ops *ops); extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, struct Qdisc_ops *ops, u32 parentid); -extern void __qdisc_calculate_pkt_len(struct sk_buff *skb, - const struct qdisc_size_table *stab); +extern void qdisc_calculate_pkt_len(struct sk_buff *skb, + struct qdisc_size_table *stab); extern void tcf_destroy(struct tcf_proto *tp); extern void tcf_destroy_chain(struct tcf_proto **fl); @@ -430,20 +411,12 @@ enum net_xmit_qdisc_t { #define net_xmit_drop_count(e) (1) #endif -static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, - const struct Qdisc *sch) +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { #ifdef CONFIG_NET_SCHED - struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); - - if (stab) - __qdisc_calculate_pkt_len(skb, stab); + if (sch->stab) + qdisc_calculate_pkt_len(skb, sch->stab); #endif -} - -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) -{ - qdisc_calculate_pkt_len(skb, sch); return sch->enqueue(skb, sch); } @@ -472,6 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, { __skb_queue_tail(list, skb); sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -486,10 +460,8 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, { struct sk_buff *skb = __skb_dequeue(list); - if (likely(skb != NULL)) { + if (likely(skb != NULL)) sch->qstats.backlog -= qdisc_pkt_len(skb); - qdisc_bstats_update(sch, skb); - } return skb; } @@ -502,11 +474,10 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, struct sk_buff_head *list) { - struct sk_buff *skb = __skb_dequeue(list); + struct sk_buff *skb = __qdisc_dequeue_head(sch, list); if (likely(skb != NULL)) { unsigned int len = qdisc_pkt_len(skb); - sch->qstats.backlog -= len; kfree_skb(skb); return len; } diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index e3893a2b5d25..d884d268c704 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -753,8 +753,6 @@ struct proto { int level, int optname, char __user *optval, int __user *option); - int (*compat_ioctl)(struct sock *sk, - unsigned int cmd, unsigned long arg); #endif int (*sendmsg)(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); @@ -1191,7 +1189,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu); static inline void sk_filter_release(struct sk_filter *fp) { if (atomic_dec_and_test(&fp->refcnt)) - call_rcu(&fp->rcu, sk_filter_release_rcu); + call_rcu_bh(&fp->rcu, sk_filter_release_rcu); } static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index 917911165e3b..38509f047382 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -1404,7 +1404,7 @@ extern struct request_sock_ops tcp6_request_sock_ops; extern void tcp_v4_destroy_sock(struct sock *sk); extern int tcp_v4_gso_send_check(struct sk_buff *skb); -extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, diff --git a/trunk/include/net/udp.h b/trunk/include/net/udp.h index e82f3a8c0f8f..bb967dd59bf7 100644 --- a/trunk/include/net/udp.h +++ b/trunk/include/net/udp.h @@ -245,5 +245,5 @@ extern void udp4_proc_exit(void); extern void udp_init(void); extern int udp4_ufo_send_check(struct sk_buff *skb); -extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features); +extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features); #endif /* _UDP_H */ diff --git a/trunk/kernel/audit.c b/trunk/kernel/audit.c index 162e88e33bc9..e4956244ae50 100644 --- a/trunk/kernel/audit.c +++ b/trunk/kernel/audit.c @@ -74,8 +74,6 @@ static int audit_initialized; int audit_enabled; int audit_ever_enabled; -EXPORT_SYMBOL_GPL(audit_enabled); - /* Default state when kernel boots without any parameters. */ static int audit_default; diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c index 0587c5ceaed8..0caa59f747dd 100644 --- a/trunk/kernel/irq/manage.c +++ b/trunk/kernel/irq/manage.c @@ -134,10 +134,6 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) irq_set_thread_affinity(desc); } #endif - if (desc->affinity_notify) { - kref_get(&desc->affinity_notify->kref); - schedule_work(&desc->affinity_notify->work); - } desc->status |= IRQ_AFFINITY_SET; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -159,79 +155,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); -static void irq_affinity_notify(struct work_struct *work) -{ - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); - struct irq_desc *desc = irq_to_desc(notify->irq); - cpumask_var_t cpumask; - unsigned long flags; - - if (!desc) - goto out; - - if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) - goto out; - - raw_spin_lock_irqsave(&desc->lock, flags); -#ifdef CONFIG_GENERIC_PENDING_IRQ - if (desc->status & IRQ_MOVE_PENDING) - cpumask_copy(cpumask, desc->pending_mask); - else -#endif - cpumask_copy(cpumask, desc->affinity); - raw_spin_unlock_irqrestore(&desc->lock, flags); - - notify->notify(notify, cpumask); - - free_cpumask_var(cpumask); -out: - kref_put(¬ify->kref, notify->release); -} - -/** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification - * @notify: Context for notification, or %NULL to disable - * notification. Function pointers must be initialised; - * the other fields will be initialised by this function. - * - * Must be called in process context. Notification may only be enabled - * after the IRQ is allocated and must be disabled before the IRQ is - * freed using free_irq(). - */ -int -irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_affinity_notify *old_notify; - unsigned long flags; - - /* The release function is promised process context */ - might_sleep(); - - if (!desc) - return -EINVAL; - - /* Complete initialisation of *notify */ - if (notify) { - notify->irq = irq; - kref_init(¬ify->kref); - INIT_WORK(¬ify->work, irq_affinity_notify); - } - - raw_spin_lock_irqsave(&desc->lock, flags); - old_notify = desc->affinity_notify; - desc->affinity_notify = notify; - raw_spin_unlock_irqrestore(&desc->lock, flags); - - if (old_notify) - kref_put(&old_notify->kref, old_notify->release); - - return 0; -} -EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); - #ifndef CONFIG_AUTO_IRQ_AFFINITY /* * Generic version of the affinity autoselector. @@ -1081,11 +1004,6 @@ void free_irq(unsigned int irq, void *dev_id) if (!desc) return; -#ifdef CONFIG_SMP - if (WARN_ON(desc->affinity_notify)) - desc->affinity_notify = NULL; -#endif - chip_bus_lock(desc); kfree(__free_irq(irq, dev_id)); chip_bus_sync_unlock(desc); diff --git a/trunk/kernel/params.c b/trunk/kernel/params.c index 0da1411222b9..08107d181758 100644 --- a/trunk/kernel/params.c +++ b/trunk/kernel/params.c @@ -719,7 +719,9 @@ void destroy_params(const struct kernel_param *params, unsigned num) params[i].ops->free(params[i].arg); } -static struct module_kobject * __init locate_module_kobject(const char *name) +static void __init kernel_add_sysfs_param(const char *name, + struct kernel_param *kparam, + unsigned int name_skip) { struct module_kobject *mk; struct kobject *kobj; @@ -727,7 +729,10 @@ static struct module_kobject * __init locate_module_kobject(const char *name) kobj = kset_find_obj(module_kset, name); if (kobj) { + /* We already have one. Remove params so we can add more. */ mk = to_module_kobject(kobj); + /* We need to remove it before adding parameters. */ + sysfs_remove_group(&mk->kobj, &mk->mp->grp); } else { mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); BUG_ON(!mk); @@ -738,36 +743,15 @@ static struct module_kobject * __init locate_module_kobject(const char *name) "%s", name); if (err) { kobject_put(&mk->kobj); - printk(KERN_ERR - "Module '%s' failed add to sysfs, error number %d\n", - name, err); - printk(KERN_ERR - "The system will be unstable now.\n"); - return NULL; + printk(KERN_ERR "Module '%s' failed add to sysfs, " + "error number %d\n", name, err); + printk(KERN_ERR "The system will be unstable now.\n"); + return; } - - /* So that we hold reference in both cases. */ + /* So that exit path is even. */ kobject_get(&mk->kobj); } - return mk; -} - -static void __init kernel_add_sysfs_param(const char *name, - struct kernel_param *kparam, - unsigned int name_skip) -{ - struct module_kobject *mk; - int err; - - mk = locate_module_kobject(name); - if (!mk) - return; - - /* We need to remove old parameters before adding more. */ - if (mk->mp) - sysfs_remove_group(&mk->kobj, &mk->mp->grp); - /* These should not fail at boot. */ err = add_sysfs_param(mk, kparam, kparam->name + name_skip); BUG_ON(err); @@ -812,32 +796,6 @@ static void __init param_sysfs_builtin(void) } } -ssize_t __modver_version_show(struct module_attribute *mattr, - struct module *mod, char *buf) -{ - struct module_version_attribute *vattr = - container_of(mattr, struct module_version_attribute, mattr); - - return sprintf(buf, "%s\n", vattr->version); -} - -extern struct module_version_attribute __start___modver[], __stop___modver[]; - -static void __init version_sysfs_builtin(void) -{ - const struct module_version_attribute *vattr; - struct module_kobject *mk; - int err; - - for (vattr = __start___modver; vattr < __stop___modver; vattr++) { - mk = locate_module_kobject(vattr->module_name); - if (mk) { - err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); - kobject_uevent(&mk->kobj, KOBJ_ADD); - kobject_put(&mk->kobj); - } - } -} /* module-related sysfs stuff */ @@ -917,7 +875,6 @@ static int __init param_sysfs_init(void) } module_sysfs_initialized = 1; - version_sysfs_builtin(); param_sysfs_builtin(); return 0; diff --git a/trunk/kernel/perf_event.c b/trunk/kernel/perf_event.c index 126a302c481c..84522c796987 100644 --- a/trunk/kernel/perf_event.c +++ b/trunk/kernel/perf_event.c @@ -2201,6 +2201,13 @@ find_lively_task_by_vpid(pid_t vpid) if (!task) return ERR_PTR(-ESRCH); + /* + * Can't attach events to a dying task. + */ + err = -ESRCH; + if (task->flags & PF_EXITING) + goto errout; + /* Reuse ptrace permission checks for now. */ err = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ)) @@ -2261,27 +2268,14 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) get_ctx(ctx); - err = 0; - mutex_lock(&task->perf_event_mutex); - /* - * If it has already passed perf_event_exit_task(). - * we must see PF_EXITING, it takes this mutex too. - */ - if (task->flags & PF_EXITING) - err = -ESRCH; - else if (task->perf_event_ctxp[ctxn]) - err = -EAGAIN; - else - rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); - mutex_unlock(&task->perf_event_mutex); - - if (unlikely(err)) { + if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { + /* + * We raced with some other task; use + * the context they set. + */ put_task_struct(task); kfree(ctx); - - if (err == -EAGAIN) - goto retry; - goto errout; + goto retry; } } @@ -5380,8 +5374,6 @@ static int pmu_dev_alloc(struct pmu *pmu) goto out; } -static struct lock_class_key cpuctx_mutex; - int perf_pmu_register(struct pmu *pmu, char *name, int type) { int cpu, ret; @@ -5430,7 +5422,6 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type) cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx); - lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; cpuctx->jiffies_interval = 1; @@ -6136,7 +6127,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * scheduled, so we are now safe from rescheduling changing * our context. */ - child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); + child_ctx = child->perf_event_ctxp[ctxn]; task_ctx_sched_out(child_ctx, EVENT_ALL); /* @@ -6449,6 +6440,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) unsigned long flags; int ret = 0; + child->perf_event_ctxp[ctxn] = NULL; + + mutex_init(&child->perf_event_mutex); + INIT_LIST_HEAD(&child->perf_event_list); + if (likely(!parent->perf_event_ctxp[ctxn])) return 0; @@ -6537,10 +6533,6 @@ int perf_event_init_task(struct task_struct *child) { int ctxn, ret; - memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); - mutex_init(&child->perf_event_mutex); - INIT_LIST_HEAD(&child->perf_event_list); - for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); if (ret) diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 354769979c02..77e9166d7bbf 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -699,8 +699,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) cfs_rq->nr_running--; } -#ifdef CONFIG_FAIR_GROUP_SCHED -# ifdef CONFIG_SMP +#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, int global_update) { @@ -763,51 +762,6 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) list_del_leaf_cfs_rq(cfs_rq); } -static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, - long weight_delta) -{ - long load_weight, load, shares; - - load = cfs_rq->load.weight + weight_delta; - - load_weight = atomic_read(&tg->load_weight); - load_weight -= cfs_rq->load_contribution; - load_weight += load; - - shares = (tg->shares * load); - if (load_weight) - shares /= load_weight; - - if (shares < MIN_SHARES) - shares = MIN_SHARES; - if (shares > tg->shares) - shares = tg->shares; - - return shares; -} - -static void update_entity_shares_tick(struct cfs_rq *cfs_rq) -{ - if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { - update_cfs_load(cfs_rq, 0); - update_cfs_shares(cfs_rq, 0); - } -} -# else /* CONFIG_SMP */ -static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) -{ -} - -static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, - long weight_delta) -{ - return tg->shares; -} - -static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) -{ -} -# endif /* CONFIG_SMP */ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) { @@ -828,7 +782,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) { struct task_group *tg; struct sched_entity *se; - long shares; + long load_weight, load, shares; if (!cfs_rq) return; @@ -837,14 +791,32 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) se = tg->se[cpu_of(rq_of(cfs_rq))]; if (!se) return; -#ifndef CONFIG_SMP - if (likely(se->load.weight == tg->shares)) - return; -#endif - shares = calc_cfs_shares(cfs_rq, tg, weight_delta); + + load = cfs_rq->load.weight + weight_delta; + + load_weight = atomic_read(&tg->load_weight); + load_weight -= cfs_rq->load_contribution; + load_weight += load; + + shares = (tg->shares * load); + if (load_weight) + shares /= load_weight; + + if (shares < MIN_SHARES) + shares = MIN_SHARES; + if (shares > tg->shares) + shares = tg->shares; reweight_entity(cfs_rq_of(se), se, shares); } + +static void update_entity_shares_tick(struct cfs_rq *cfs_rq) +{ + if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { + update_cfs_load(cfs_rq, 0); + update_cfs_shares(cfs_rq, 0); + } +} #else /* CONFIG_FAIR_GROUP_SCHED */ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) { diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index c55ea2433471..3e216e01bbd1 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -642,7 +642,8 @@ static void tick_nohz_switch_to_nohz(void) } local_irq_enable(); - printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); + printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", + smp_processor_id()); } /* @@ -794,10 +795,8 @@ void tick_setup_sched_timer(void) } #ifdef CONFIG_NO_HZ - if (tick_nohz_enabled) { + if (tick_nohz_enabled) ts->nohz_mode = NOHZ_MODE_HIGHRES; - printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); - } #endif } #endif /* HIGH_RES_TIMERS */ diff --git a/trunk/lib/Kconfig b/trunk/lib/Kconfig index 8334342e0d05..0ee67e08ad3e 100644 --- a/trunk/lib/Kconfig +++ b/trunk/lib/Kconfig @@ -201,10 +201,6 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS depends on EXPERIMENTAL && BROKEN -config CPU_RMAP - bool - depends on SMP - # # Netlink attribute parsing support is select'ed if needed # diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index b73ba01a818a..cbb774f7d41d 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -110,8 +110,6 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o obj-$(CONFIG_AVERAGE) += average.o -obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o - hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/trunk/lib/cpu_rmap.c b/trunk/lib/cpu_rmap.c deleted file mode 100644 index 987acfafeb83..000000000000 --- a/trunk/lib/cpu_rmap.c +++ /dev/null @@ -1,269 +0,0 @@ -/* - * cpu_rmap.c: CPU affinity reverse-map support - * Copyright 2011 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#include -#ifdef CONFIG_GENERIC_HARDIRQS -#include -#endif -#include - -/* - * These functions maintain a mapping from CPUs to some ordered set of - * objects with CPU affinities. This can be seen as a reverse-map of - * CPU affinity. However, we do not assume that the object affinities - * cover all CPUs in the system. For those CPUs not directly covered - * by object affinities, we attempt to find a nearest object based on - * CPU topology. - */ - -/** - * alloc_cpu_rmap - allocate CPU affinity reverse-map - * @size: Number of objects to be mapped - * @flags: Allocation flags e.g. %GFP_KERNEL - */ -struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) -{ - struct cpu_rmap *rmap; - unsigned int cpu; - size_t obj_offset; - - /* This is a silly number of objects, and we use u16 indices. */ - if (size > 0xffff) - return NULL; - - /* Offset of object pointer array from base structure */ - obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), - sizeof(void *)); - - rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); - if (!rmap) - return NULL; - - rmap->obj = (void **)((char *)rmap + obj_offset); - - /* Initially assign CPUs to objects on a rota, since we have - * no idea where the objects are. Use infinite distance, so - * any object with known distance is preferable. Include the - * CPUs that are not present/online, since we definitely want - * any newly-hotplugged CPUs to have some object assigned. - */ - for_each_possible_cpu(cpu) { - rmap->near[cpu].index = cpu % size; - rmap->near[cpu].dist = CPU_RMAP_DIST_INF; - } - - rmap->size = size; - return rmap; -} -EXPORT_SYMBOL(alloc_cpu_rmap); - -/* Reevaluate nearest object for given CPU, comparing with the given - * neighbours at the given distance. - */ -static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, - const struct cpumask *mask, u16 dist) -{ - int neigh; - - for_each_cpu(neigh, mask) { - if (rmap->near[cpu].dist > dist && - rmap->near[neigh].dist <= dist) { - rmap->near[cpu].index = rmap->near[neigh].index; - rmap->near[cpu].dist = dist; - return true; - } - } - return false; -} - -#ifdef DEBUG -static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) -{ - unsigned index; - unsigned int cpu; - - pr_info("cpu_rmap %p, %s:\n", rmap, prefix); - - for_each_possible_cpu(cpu) { - index = rmap->near[cpu].index; - pr_info("cpu %d -> obj %u (distance %u)\n", - cpu, index, rmap->near[cpu].dist); - } -} -#else -static inline void -debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) -{ -} -#endif - -/** - * cpu_rmap_add - add object to a rmap - * @rmap: CPU rmap allocated with alloc_cpu_rmap() - * @obj: Object to add to rmap - * - * Return index of object. - */ -int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) -{ - u16 index; - - BUG_ON(rmap->used >= rmap->size); - index = rmap->used++; - rmap->obj[index] = obj; - return index; -} -EXPORT_SYMBOL(cpu_rmap_add); - -/** - * cpu_rmap_update - update CPU rmap following a change of object affinity - * @rmap: CPU rmap to update - * @index: Index of object whose affinity changed - * @affinity: New CPU affinity of object - */ -int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, - const struct cpumask *affinity) -{ - cpumask_var_t update_mask; - unsigned int cpu; - - if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL))) - return -ENOMEM; - - /* Invalidate distance for all CPUs for which this used to be - * the nearest object. Mark those CPUs for update. - */ - for_each_online_cpu(cpu) { - if (rmap->near[cpu].index == index) { - rmap->near[cpu].dist = CPU_RMAP_DIST_INF; - cpumask_set_cpu(cpu, update_mask); - } - } - - debug_print_rmap(rmap, "after invalidating old distances"); - - /* Set distance to 0 for all CPUs in the new affinity mask. - * Mark all CPUs within their NUMA nodes for update. - */ - for_each_cpu(cpu, affinity) { - rmap->near[cpu].index = index; - rmap->near[cpu].dist = 0; - cpumask_or(update_mask, update_mask, - cpumask_of_node(cpu_to_node(cpu))); - } - - debug_print_rmap(rmap, "after updating neighbours"); - - /* Update distances based on topology */ - for_each_cpu(cpu, update_mask) { - if (cpu_rmap_copy_neigh(rmap, cpu, - topology_thread_cpumask(cpu), 1)) - continue; - if (cpu_rmap_copy_neigh(rmap, cpu, - topology_core_cpumask(cpu), 2)) - continue; - if (cpu_rmap_copy_neigh(rmap, cpu, - cpumask_of_node(cpu_to_node(cpu)), 3)) - continue; - /* We could continue into NUMA node distances, but for now - * we give up. - */ - } - - debug_print_rmap(rmap, "after copying neighbours"); - - free_cpumask_var(update_mask); - return 0; -} -EXPORT_SYMBOL(cpu_rmap_update); - -#ifdef CONFIG_GENERIC_HARDIRQS - -/* Glue between IRQ affinity notifiers and CPU rmaps */ - -struct irq_glue { - struct irq_affinity_notify notify; - struct cpu_rmap *rmap; - u16 index; -}; - -/** - * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs - * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL - * - * Must be called in process context, before freeing the IRQs, and - * without holding any locks required by global workqueue items. - */ -void free_irq_cpu_rmap(struct cpu_rmap *rmap) -{ - struct irq_glue *glue; - u16 index; - - if (!rmap) - return; - - for (index = 0; index < rmap->used; index++) { - glue = rmap->obj[index]; - irq_set_affinity_notifier(glue->notify.irq, NULL); - } - irq_run_affinity_notifiers(); - - kfree(rmap); -} -EXPORT_SYMBOL(free_irq_cpu_rmap); - -static void -irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) -{ - struct irq_glue *glue = - container_of(notify, struct irq_glue, notify); - int rc; - - rc = cpu_rmap_update(glue->rmap, glue->index, mask); - if (rc) - pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); -} - -static void irq_cpu_rmap_release(struct kref *ref) -{ - struct irq_glue *glue = - container_of(ref, struct irq_glue, notify.kref); - kfree(glue); -} - -/** - * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map - * @rmap: The reverse-map - * @irq: The IRQ number - * - * This adds an IRQ affinity notifier that will update the reverse-map - * automatically. - * - * Must be called in process context, after the IRQ is allocated but - * before it is bound with request_irq(). - */ -int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) -{ - struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL); - int rc; - - if (!glue) - return -ENOMEM; - glue->notify.notify = irq_cpu_rmap_notify; - glue->notify.release = irq_cpu_rmap_release; - glue->rmap = rmap; - glue->index = cpu_rmap_add(rmap, glue); - rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) - kfree(glue); - return rc; -} -EXPORT_SYMBOL(irq_cpu_rmap_add); - -#endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/trunk/lib/textsearch.c b/trunk/lib/textsearch.c index e0cc0146ae62..d608331b3e47 100644 --- a/trunk/lib/textsearch.c +++ b/trunk/lib/textsearch.c @@ -13,7 +13,7 @@ * * INTRODUCTION * - * The textsearch infrastructure provides text searching facilities for + * The textsearch infrastructure provides text searching facitilies for * both linear and non-linear data. Individual search algorithms are * implemented in modules and chosen by the user. * @@ -43,7 +43,7 @@ * to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. - * (5) Algorithm calls get_next_block() provided by the user continuously + * (5) Algorithm calls get_next_block() provided by the user continously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) @@ -58,15 +58,15 @@ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE * to perform case insensitive matching. But it might slow down * performance of algorithm, so you should use it at own your risk. - * The returned configuration may then be used for an arbitrary + * The returned configuration may then be used for an arbitary * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * * The actual search is performed by either calling textsearch_find_- * continuous() for linear data or by providing an own get_next_block() * implementation and calling textsearch_find(). Both functions return - * the position of the first occurrence of the pattern or UINT_MAX if - * no match was found. Subsequent occurrences can be found by calling + * the position of the first occurrence of the patern or UINT_MAX if + * no match was found. Subsequent occurences can be found by calling * textsearch_next() regardless of the linearity of the data. * * Once you're done using a configuration it must be given back via diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index 7850412f52b7..6e64f7c6a2e9 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev, static void vlan_transfer_features(struct net_device *dev, struct net_device *vlandev) { - u32 old_features = vlandev->features; + unsigned long old_features = vlandev->features; vlandev->features &= ~dev->vlan_features; vlandev->features |= dev->features & dev->vlan_features; diff --git a/trunk/net/9p/trans_rdma.c b/trunk/net/9p/trans_rdma.c index 29a54ccd213d..17c5ba7551a5 100644 --- a/trunk/net/9p/trans_rdma.c +++ b/trunk/net/9p/trans_rdma.c @@ -59,6 +59,7 @@ * safely advertise a maxsize * of 64k */ +#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT) /** * struct p9_trans_rdma - RDMA transport instance * diff --git a/trunk/net/Kconfig b/trunk/net/Kconfig index 79cabf1ee68b..72840626284b 100644 --- a/trunk/net/Kconfig +++ b/trunk/net/Kconfig @@ -221,12 +221,6 @@ config RPS depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y -config RFS_ACCEL - boolean - depends on RPS && GENERIC_HARDIRQS - select CPU_RMAP - default y - config XPS boolean depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS diff --git a/trunk/net/batman-adv/unicast.c b/trunk/net/batman-adv/unicast.c index ee41fef04b21..811f7fc7932d 100644 --- a/trunk/net/batman-adv/unicast.c +++ b/trunk/net/batman-adv/unicast.c @@ -224,7 +224,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, struct unicast_frag_packet *frag1, *frag2; int uc_hdr_len = sizeof(struct unicast_packet); int ucf_hdr_len = sizeof(struct unicast_frag_packet); - int data_len = skb->len; + int data_len = skb->len - uc_hdr_len; if (!bat_priv->primary_if) goto dropped; @@ -232,10 +232,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); if (!frag_skb) goto dropped; + skb_reserve(frag_skb, ucf_hdr_len); unicast_packet = (struct unicast_packet *) skb->data; memcpy(&tmp_uc, unicast_packet, uc_hdr_len); - skb_split(skb, frag_skb, data_len / 2); + skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || my_skb_head_push(frag_skb, ucf_hdr_len) < 0) diff --git a/trunk/net/batman-adv/vis.c b/trunk/net/batman-adv/vis.c index de1022cacaf7..cd4c4231fa48 100644 --- a/trunk/net/batman-adv/vis.c +++ b/trunk/net/batman-adv/vis.c @@ -64,7 +64,6 @@ static void free_info(struct kref *ref) spin_unlock_bh(&bat_priv->vis_list_lock); kfree_skb(info->skb_packet); - kfree(info); } /* Compare two vis packets, used by the hashing algorithm */ @@ -269,10 +268,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) buff_pos += sprintf(buff + buff_pos, "%pM,", entry->addr); - for (j = 0; j < packet->entries; j++) + for (i = 0; i < packet->entries; i++) buff_pos += vis_data_read_entry( buff + buff_pos, - &entries[j], + &entries[i], entry->addr, entry->primary); @@ -445,7 +444,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, info); if (hash_added < 0) { /* did not work (for some reason) */ - kref_put(&info->refcount, free_info); + kref_put(&old_info->refcount, free_info); info = NULL; } @@ -816,7 +815,7 @@ static void send_vis_packets(struct work_struct *work) container_of(work, struct delayed_work, work); struct bat_priv *bat_priv = container_of(delayed_work, struct bat_priv, vis_work); - struct vis_info *info; + struct vis_info *info, *temp; spin_lock_bh(&bat_priv->vis_hash_lock); purge_vis_packets(bat_priv); @@ -826,9 +825,8 @@ static void send_vis_packets(struct work_struct *work) send_list_add(bat_priv, bat_priv->my_vis_info); } - while (!list_empty(&bat_priv->vis_send_list)) { - info = list_first_entry(&bat_priv->vis_send_list, - typeof(*info), send_list); + list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list, + send_list) { kref_get(&info->refcount); spin_unlock_bh(&bat_priv->vis_hash_lock); diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index 99cd8d9d891b..6b90a4191734 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -379,10 +379,14 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 hci_conn_hold(acl); if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { - acl->sec_level = BT_SECURITY_LOW; - acl->pending_sec_level = sec_level; + acl->sec_level = sec_level; acl->auth_type = auth_type; hci_acl_connect(acl); + } else { + if (acl->sec_level < sec_level) + acl->sec_level = sec_level; + if (acl->auth_type < auth_type) + acl->auth_type = auth_type; } if (type == ACL_LINK) @@ -438,17 +442,11 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) { BT_DBG("conn %p", conn); - if (conn->pending_sec_level > sec_level) - sec_level = conn->pending_sec_level; - if (sec_level > conn->sec_level) - conn->pending_sec_level = sec_level; + conn->sec_level = sec_level; else if (conn->link_mode & HCI_LM_AUTH) return 1; - /* Make sure we preserve an existing MITM requirement*/ - auth_type |= (conn->auth_type & 0x01); - conn->auth_type = auth_type; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index 9c4541bc488a..8b602d881fd7 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -1011,10 +1011,6 @@ int hci_unregister_dev(struct hci_dev *hdev) destroy_workqueue(hdev->workqueue); - hci_dev_lock_bh(hdev); - hci_blacklist_clear(hdev); - hci_dev_unlock_bh(hdev); - __hci_dev_put(hdev); return 0; diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index a290854fdaa6..38100170d380 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -692,13 +692,13 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev, if (conn->state != BT_CONFIG || !conn->out) return 0; - if (conn->pending_sec_level == BT_SECURITY_SDP) + if (conn->sec_level == BT_SECURITY_SDP) return 0; /* Only request authentication for SSP connections or non-SSP * devices with sec_level HIGH */ if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && - conn->pending_sec_level != BT_SECURITY_HIGH) + conn->sec_level != BT_SECURITY_HIGH) return 0; return 1; @@ -1095,10 +1095,9 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { - if (!ev->status) { + if (!ev->status) conn->link_mode |= HCI_LM_AUTH; - conn->sec_level = conn->pending_sec_level; - } else + else conn->sec_level = BT_SECURITY_LOW; clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); diff --git a/trunk/net/bluetooth/l2cap.c b/trunk/net/bluetooth/l2cap.c index 7550abb0c96a..c791fcda7b2d 100644 --- a/trunk/net/bluetooth/l2cap.c +++ b/trunk/net/bluetooth/l2cap.c @@ -305,44 +305,33 @@ static void l2cap_chan_del(struct sock *sk, int err) } } -static inline u8 l2cap_get_auth_type(struct sock *sk) +/* Service level security */ +static inline int l2cap_check_security(struct sock *sk) { - if (sk->sk_type == SOCK_RAW) { - switch (l2cap_pi(sk)->sec_level) { - case BT_SECURITY_HIGH: - return HCI_AT_DEDICATED_BONDING_MITM; - case BT_SECURITY_MEDIUM: - return HCI_AT_DEDICATED_BONDING; - default: - return HCI_AT_NO_BONDING; - } - } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { - if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) - l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + __u8 auth_type; + if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) - return HCI_AT_NO_BONDING_MITM; + auth_type = HCI_AT_NO_BONDING_MITM; else - return HCI_AT_NO_BONDING; + auth_type = HCI_AT_NO_BONDING; + + if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; } else { switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_HIGH: - return HCI_AT_GENERAL_BONDING_MITM; + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; case BT_SECURITY_MEDIUM: - return HCI_AT_GENERAL_BONDING; + auth_type = HCI_AT_GENERAL_BONDING; + break; default: - return HCI_AT_NO_BONDING; + auth_type = HCI_AT_NO_BONDING; + break; } } -} - -/* Service level security */ -static inline int l2cap_check_security(struct sock *sk) -{ - struct l2cap_conn *conn = l2cap_pi(sk)->conn; - __u8 auth_type; - - auth_type = l2cap_get_auth_type(sk); return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, auth_type); @@ -1079,7 +1068,39 @@ static int l2cap_do_connect(struct sock *sk) err = -ENOMEM; - auth_type = l2cap_get_auth_type(sk); + if (sk->sk_type == SOCK_RAW) { + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_DEDICATED_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_DEDICATED_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { + if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) + auth_type = HCI_AT_NO_BONDING_MITM; + else + auth_type = HCI_AT_NO_BONDING; + + if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; + } else { + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_GENERAL_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + } hcon = hci_connect(hdev, ACL_LINK, dst, l2cap_pi(sk)->sec_level, auth_type); @@ -1106,8 +1127,7 @@ static int l2cap_do_connect(struct sock *sk) if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { l2cap_sock_clear_timer(sk); - if (l2cap_check_security(sk)) - sk->sk_state = BT_CONNECTED; + sk->sk_state = BT_CONNECTED; } else l2cap_do_start(sk); } @@ -1873,8 +1893,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms if (pi->mode == L2CAP_MODE_STREAMING) { l2cap_streaming_send(sk); } else { - if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && - (pi->conn_state & L2CAP_CONN_WAIT_F)) { + if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && + pi->conn_state && L2CAP_CONN_WAIT_F) { err = len; break; } diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index 6b83776534fb..ff8aaa736650 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -1164,8 +1164,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) * initiator rfcomm_process_rx already calls * rfcomm_session_put() */ if (s->sock->sk->sk_state != BT_CLOSED) - if (list_empty(&s->dlcs)) - rfcomm_session_put(s); + rfcomm_session_put(s); break; } } diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c index 2a6801d8b728..d9d1e2bac1d6 100644 --- a/trunk/net/bridge/br_if.c +++ b/trunk/net/bridge/br_if.c @@ -365,7 +365,7 @@ int br_min_mtu(const struct net_bridge *br) void br_features_recompute(struct net_bridge *br) { struct net_bridge_port *p; - u32 features, mask; + unsigned long features, mask; features = mask = br->feature_mask; if (list_empty(&br->port_list)) @@ -379,7 +379,7 @@ void br_features_recompute(struct net_bridge *br) } done: - br->dev->features = netdev_fix_features(br->dev, features); + br->dev->features = netdev_fix_features(features, NULL); } /* called with RTNL */ diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h index 9f22898c5359..84aac7734bfc 100644 --- a/trunk/net/bridge/br_private.h +++ b/trunk/net/bridge/br_private.h @@ -182,7 +182,7 @@ struct net_bridge struct br_cpu_netstats __percpu *stats; spinlock_t hash_lock; struct hlist_head hash[BR_HASH_SIZE]; - u32 feature_mask; + unsigned long feature_mask; #ifdef CONFIG_BRIDGE_NETFILTER struct rtable fake_rtable; bool nf_call_iptables; diff --git a/trunk/net/bridge/netfilter/ebt_ip6.c b/trunk/net/bridge/netfilter/ebt_ip6.c index 2ed0056a39a8..50a46afc2bcc 100644 --- a/trunk/net/bridge/netfilter/ebt_ip6.c +++ b/trunk/net/bridge/netfilter/ebt_ip6.c @@ -22,15 +22,9 @@ #include #include -union pkthdr { - struct { - __be16 src; - __be16 dst; - } tcpudphdr; - struct { - u8 type; - u8 code; - } icmphdr; +struct tcpudphdr { + __be16 src; + __be16 dst; }; static bool @@ -39,8 +33,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) const struct ebt_ip6_info *info = par->matchinfo; const struct ipv6hdr *ih6; struct ipv6hdr _ip6h; - const union pkthdr *pptr; - union pkthdr _pkthdr; + const struct tcpudphdr *pptr; + struct tcpudphdr _ports; ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); if (ih6 == NULL) @@ -62,34 +56,26 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) return false; if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) return false; - if (!(info->bitmask & ( EBT_IP6_DPORT | - EBT_IP6_SPORT | EBT_IP6_ICMP6))) + if (!(info->bitmask & EBT_IP6_DPORT) && + !(info->bitmask & EBT_IP6_SPORT)) return true; - - /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */ - pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr), - &_pkthdr); + pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports), + &_ports); if (pptr == NULL) return false; if (info->bitmask & EBT_IP6_DPORT) { - u16 dst = ntohs(pptr->tcpudphdr.dst); + u32 dst = ntohs(pptr->dst); if (FWINV(dst < info->dport[0] || dst > info->dport[1], EBT_IP6_DPORT)) return false; } if (info->bitmask & EBT_IP6_SPORT) { - u16 src = ntohs(pptr->tcpudphdr.src); + u32 src = ntohs(pptr->src); if (FWINV(src < info->sport[0] || src > info->sport[1], EBT_IP6_SPORT)) return false; } - if ((info->bitmask & EBT_IP6_ICMP6) && - FWINV(pptr->icmphdr.type < info->icmpv6_type[0] || - pptr->icmphdr.type > info->icmpv6_type[1] || - pptr->icmphdr.code < info->icmpv6_code[0] || - pptr->icmphdr.code > info->icmpv6_code[1], - EBT_IP6_ICMP6)) - return false; + return true; } return true; } @@ -117,14 +103,6 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par) return -EINVAL; if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) return -EINVAL; - if (info->bitmask & EBT_IP6_ICMP6) { - if ((info->invflags & EBT_IP6_PROTO) || - info->protocol != IPPROTO_ICMPV6) - return -EINVAL; - if (info->icmpv6_type[0] > info->icmpv6_type[1] || - info->icmpv6_code[0] > info->icmpv6_code[1]) - return -EINVAL; - } return 0; } diff --git a/trunk/net/bridge/netfilter/ebtables.c b/trunk/net/bridge/netfilter/ebtables.c index 5f1825df9dca..16df0532d4b9 100644 --- a/trunk/net/bridge/netfilter/ebtables.c +++ b/trunk/net/bridge/netfilter/ebtables.c @@ -1764,7 +1764,6 @@ static int compat_table_info(const struct ebt_table_info *info, newinfo->entries_size = size; - xt_compat_init_offsets(AF_INET, info->nentries); return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } diff --git a/trunk/net/caif/cfcnfg.c b/trunk/net/caif/cfcnfg.c index f1f98d967d8a..c665de778b60 100644 --- a/trunk/net/caif/cfcnfg.c +++ b/trunk/net/caif/cfcnfg.c @@ -23,8 +23,10 @@ #include #define MAX_PHY_LAYERS 7 +#define PHY_NAME_LEN 20 #define container_obj(layr) container_of(layr, struct cfcnfg, layer) +#define RFM_FRAGMENT_SIZE 4030 /* Information about CAIF physical interfaces held by Config Module in order * to manage physical interfaces diff --git a/trunk/net/caif/cfdgml.c b/trunk/net/caif/cfdgml.c index 27dab26ad3b8..d3ed264ad6c4 100644 --- a/trunk/net/caif/cfdgml.c +++ b/trunk/net/caif/cfdgml.c @@ -18,6 +18,7 @@ #define DGM_CMD_BIT 0x80 #define DGM_FLOW_OFF 0x81 #define DGM_FLOW_ON 0x80 +#define DGM_CTRL_PKT_SIZE 1 #define DGM_MTU 1500 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); diff --git a/trunk/net/caif/cfserl.c b/trunk/net/caif/cfserl.c index 8303fe3ebf89..9297f7dea9d8 100644 --- a/trunk/net/caif/cfserl.c +++ b/trunk/net/caif/cfserl.c @@ -25,6 +25,7 @@ struct cfserl { spinlock_t sync; bool usestx; }; +#define STXLEN(layr) (layr->usestx ? 1 : 0) static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); diff --git a/trunk/net/caif/cfutill.c b/trunk/net/caif/cfutill.c index 315c0d601368..efad410e4c82 100644 --- a/trunk/net/caif/cfutill.c +++ b/trunk/net/caif/cfutill.c @@ -20,7 +20,7 @@ #define UTIL_REMOTE_SHUTDOWN 0x82 #define UTIL_FLOW_OFF 0x81 #define UTIL_FLOW_ON 0x80 - +#define UTIL_CTRL_PKT_SIZE 1 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt); diff --git a/trunk/net/caif/cfveil.c b/trunk/net/caif/cfveil.c index c3b1dec4acf6..3b425b189a99 100644 --- a/trunk/net/caif/cfveil.c +++ b/trunk/net/caif/cfveil.c @@ -17,7 +17,7 @@ #define VEI_FLOW_OFF 0x81 #define VEI_FLOW_ON 0x80 #define VEI_SET_PIN 0x82 - +#define VEI_CTRL_PKT_SIZE 1 #define container_obj(layr) container_of(layr, struct cfsrvl, layer) static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 9109e2648d4d..7c6a46f80372 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -132,7 +132,6 @@ #include #include #include -#include #include "net-sysfs.h" @@ -750,8 +749,7 @@ EXPORT_SYMBOL(dev_get_by_index); * @ha: hardware address * * Search for an interface by MAC address. Returns NULL if the device - * is not found or a pointer to the device. - * The caller must hold RCU or RTNL. + * is not found or a pointer to the device. The caller must hold RCU * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * @@ -1287,7 +1285,7 @@ static int __dev_close(struct net_device *dev) return __dev_close_many(&single); } -static int dev_close_many(struct list_head *head) +int dev_close_many(struct list_head *head) { struct net_device *dev, *tmp; LIST_HEAD(tmp_list); @@ -1595,48 +1593,6 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) rcu_read_unlock(); } -/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change - * @dev: Network device - * @txq: number of queues available - * - * If real_num_tx_queues is changed the tc mappings may no longer be - * valid. To resolve this verify the tc mapping remains valid and if - * not NULL the mapping. With no priorities mapping to this - * offset/count pair it will no longer be used. In the worst case TC0 - * is invalid nothing can be done so disable priority mappings. If is - * expected that drivers will fix this mapping if they can before - * calling netif_set_real_num_tx_queues. - */ -static void netif_setup_tc(struct net_device *dev, unsigned int txq) -{ - int i; - struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; - - /* If TC0 is invalidated disable TC mapping */ - if (tc->offset + tc->count > txq) { - pr_warning("Number of in use tx queues changed " - "invalidating tc mappings. Priority " - "traffic classification disabled!\n"); - dev->num_tc = 0; - return; - } - - /* Invalidated prio to tc mappings set to TC0 */ - for (i = 1; i < TC_BITMASK + 1; i++) { - int q = netdev_get_prio_tc_map(dev, i); - - tc = &dev->tc_to_txq[q]; - if (tc->offset + tc->count > txq) { - pr_warning("Number of in use tx queues " - "changed. Priority %i to tc " - "mapping %i is no longer valid " - "setting map to 0\n", - i, q); - netdev_set_prio_tc_map(dev, i, 0); - } - } -} - /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. @@ -1656,9 +1612,6 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (rc) return rc; - if (dev->num_tc) - netif_setup_tc(dev, txq); - if (txq < dev->real_num_tx_queues) qdisc_reset_all_tx_gt(dev, txq); } @@ -1858,7 +1811,7 @@ EXPORT_SYMBOL(skb_checksum_help); * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ -struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; @@ -2046,7 +1999,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol) protocol == htons(ETH_P_FCOE))); } -static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features) +static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) { if (!can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; @@ -2058,10 +2011,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features return features; } -u32 netif_skb_features(struct sk_buff *skb) +int netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; - u32 features = skb->dev->features; + int features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; @@ -2106,7 +2059,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int rc = NETDEV_TX_OK; if (likely(!skb->next)) { - u32 features; + int features; /* * If device doesnt need skb->dst, release it right now while @@ -2208,8 +2161,6 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; - u16 qoffset = 0; - u16 qcount = num_tx_queues; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); @@ -2218,19 +2169,13 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, return hash; } - if (dev->num_tc) { - u8 tc = netdev_get_prio_tc_map(dev, skb->priority); - qoffset = dev->tc_to_txq[tc].offset; - qcount = dev->tc_to_txq[tc].count; - } - if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ skb->rxhash; hash = jhash_1word(hash, hashrnd); - return (u16) (((u64) hash * qcount) >> 32) + qoffset; + return (u16) (((u64) hash * num_tx_queues) >> 32); } EXPORT_SYMBOL(__skb_tx_hash); @@ -2327,18 +2272,15 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); - bool contended; + bool contended = qdisc_is_running(q); int rc; - qdisc_skb_cb(skb)->pkt_len = skb->len; - qdisc_calculate_pkt_len(skb, q); /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. * This permits __QDISC_STATE_RUNNING owner to get the lock more often * and dequeue packets faster. */ - contended = qdisc_is_running(q); if (unlikely(contended)) spin_lock(&q->busylock); @@ -2356,6 +2298,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); + qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_bstats_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { @@ -2370,7 +2313,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, rc = NET_XMIT_SUCCESS; } else { skb_dst_force(skb); - rc = q->enqueue(skb, q) & NET_XMIT_MASK; + rc = qdisc_enqueue_root(skb, q); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); @@ -2589,53 +2532,6 @@ EXPORT_SYMBOL(__skb_get_rxhash); struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); -static struct rps_dev_flow * -set_rps_cpu(struct net_device *dev, struct sk_buff *skb, - struct rps_dev_flow *rflow, u16 next_cpu) -{ - u16 tcpu; - - tcpu = rflow->cpu = next_cpu; - if (tcpu != RPS_NO_CPU) { -#ifdef CONFIG_RFS_ACCEL - struct netdev_rx_queue *rxqueue; - struct rps_dev_flow_table *flow_table; - struct rps_dev_flow *old_rflow; - u32 flow_id; - u16 rxq_index; - int rc; - - /* Should we steer this flow to a different hardware queue? */ - if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap) - goto out; - rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); - if (rxq_index == skb_get_rx_queue(skb)) - goto out; - - rxqueue = dev->_rx + rxq_index; - flow_table = rcu_dereference(rxqueue->rps_flow_table); - if (!flow_table) - goto out; - flow_id = skb->rxhash & flow_table->mask; - rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, - rxq_index, flow_id); - if (rc < 0) - goto out; - old_rflow = rflow; - rflow = &flow_table->flows[flow_id]; - rflow->cpu = next_cpu; - rflow->filter = rc; - if (old_rflow->filter == rflow->filter) - old_rflow->filter = RPS_NO_FILTER; - out: -#endif - rflow->last_qtail = - per_cpu(softnet_data, tcpu).input_queue_head; - } - - return rflow; -} - /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. @@ -2706,9 +2602,12 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - - rflow->last_qtail)) >= 0)) - rflow = set_rps_cpu(dev, skb, rflow, next_cpu); - + rflow->last_qtail)) >= 0)) { + tcpu = rflow->cpu = next_cpu; + if (tcpu != RPS_NO_CPU) + rflow->last_qtail = per_cpu(softnet_data, + tcpu).input_queue_head; + } if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; @@ -2729,46 +2628,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, return cpu; } -#ifdef CONFIG_RFS_ACCEL - -/** - * rps_may_expire_flow - check whether an RFS hardware filter may be removed - * @dev: Device on which the filter was set - * @rxq_index: RX queue index - * @flow_id: Flow ID passed to ndo_rx_flow_steer() - * @filter_id: Filter ID returned by ndo_rx_flow_steer() - * - * Drivers that implement ndo_rx_flow_steer() should periodically call - * this function for each installed filter and remove the filters for - * which it returns %true. - */ -bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, - u32 flow_id, u16 filter_id) -{ - struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; - struct rps_dev_flow_table *flow_table; - struct rps_dev_flow *rflow; - bool expire = true; - int cpu; - - rcu_read_lock(); - flow_table = rcu_dereference(rxqueue->rps_flow_table); - if (flow_table && flow_id <= flow_table->mask) { - rflow = &flow_table->flows[flow_id]; - cpu = ACCESS_ONCE(rflow->cpu); - if (rflow->filter == filter_id && cpu != RPS_NO_CPU && - ((int)(per_cpu(softnet_data, cpu).input_queue_head - - rflow->last_qtail) < - (int)(10 * flow_table->mask))) - expire = false; - } - rcu_read_unlock(); - return expire; -} -EXPORT_SYMBOL(rps_may_expire_flow); - -#endif /* CONFIG_RFS_ACCEL */ - /* Called from hardirq (IPI) context */ static void rps_trigger_softirq(void *data) { @@ -3564,7 +3423,6 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) __skb_pull(skb, skb_headlen(skb)); skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); skb->vlan_tci = 0; - skb->dev = napi->dev; napi->skb = skb; } @@ -4052,15 +3910,12 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev = v; - - if (v == SEQ_START_TOKEN) - dev = first_net_device_rcu(seq_file_net(seq)); - else - dev = next_net_device_rcu(dev); + struct net_device *dev = (v == SEQ_START_TOKEN) ? + first_net_device(seq_file_net(seq)) : + next_net_device((struct net_device *)v); ++*pos; - return dev; + return rcu_dereference(dev); } void dev_seq_stop(struct seq_file *seq, void *v) @@ -4716,17 +4571,6 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) } EXPORT_SYMBOL(dev_set_mtu); -/** - * dev_set_group - Change group this device belongs to - * @dev: device - * @new_group: group this device should belong to - */ -void dev_set_group(struct net_device *dev, int new_group) -{ - dev->group = new_group; -} -EXPORT_SYMBOL(dev_set_group); - /** * dev_set_mac_address - Change Media Access Control Address * @dev: device @@ -5217,49 +5061,41 @@ static void rollback_registered(struct net_device *dev) rollback_registered_many(&single); } -u32 netdev_fix_features(struct net_device *dev, u32 features) +unsigned long netdev_fix_features(unsigned long features, const char *name) { - /* Fix illegal checksum combinations */ - if ((features & NETIF_F_HW_CSUM) && - (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, "mixed HW and IP checksum settings.\n"); - features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); - } - - if ((features & NETIF_F_NO_CSUM) && - (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, "mixed no checksumming and other settings.\n"); - features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); - } - /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { - netdev_info(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); + if (name) + printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no " + "checksum feature.\n", name); features &= ~NETIF_F_SG; } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { - netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); + if (name) + printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no " + "SG feature.\n", name); features &= ~NETIF_F_TSO; } - /* UFO needs SG and checksumming */ if (features & NETIF_F_UFO) { /* maybe split UFO into V4 and V6? */ if (!((features & NETIF_F_GEN_CSUM) || (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, - "Dropping NETIF_F_UFO since no checksum offload features.\n"); + if (name) + printk(KERN_ERR "%s: Dropping NETIF_F_UFO " + "since no checksum offload features.\n", + name); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { - netdev_info(dev, - "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); + if (name) + printk(KERN_ERR "%s: Dropping NETIF_F_UFO " + "since no NETIF_F_SG feature.\n", name); features &= ~NETIF_F_UFO; } } @@ -5402,7 +5238,22 @@ int register_netdevice(struct net_device *dev) if (dev->iflink == -1) dev->iflink = dev->ifindex; - dev->features = netdev_fix_features(dev, dev->features); + /* Fix illegal checksum combinations */ + if ((dev->features & NETIF_F_HW_CSUM) && + (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { + printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n", + dev->name); + dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + } + + if ((dev->features & NETIF_F_NO_CSUM) && + (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { + printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n", + dev->name); + dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); + } + + dev->features = netdev_fix_features(dev->features, dev->name); /* Enable software GSO if SG is supported. */ if (dev->features & NETIF_F_SG) @@ -5827,7 +5678,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->priv_flags = IFF_XMIT_DST_RELEASE; setup(dev); strcpy(dev->name, name); - dev->group = INIT_NETDEV_GROUP; return dev; free_pcpu: @@ -6138,7 +5988,8 @@ static int dev_cpu_callback(struct notifier_block *nfb, * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ -u32 netdev_increment_features(u32 all, u32 one, u32 mask) +unsigned long netdev_increment_features(unsigned long all, unsigned long one, + unsigned long mask) { /* If device needs checksumming, downgrade to it. */ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) diff --git a/trunk/net/core/dst.c b/trunk/net/core/dst.c index c1674fde827d..b99c7c7ffce2 100644 --- a/trunk/net/core/dst.c +++ b/trunk/net/core/dst.c @@ -164,8 +164,6 @@ int dst_discard(struct sk_buff *skb) } EXPORT_SYMBOL(dst_discard); -const u32 dst_default_metrics[RTAX_MAX]; - void *dst_alloc(struct dst_ops *ops) { struct dst_entry *dst; @@ -182,7 +180,6 @@ void *dst_alloc(struct dst_ops *ops) dst->lastuse = jiffies; dst->path = dst; dst->input = dst->output = dst_discard; - dst_init_metrics(dst, dst_default_metrics, true); #if RT_CACHE_DEBUG >= 2 atomic_inc(&dst_total); #endif @@ -285,42 +282,6 @@ void dst_release(struct dst_entry *dst) } EXPORT_SYMBOL(dst_release); -u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) -{ - u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); - - if (p) { - u32 *old_p = __DST_METRICS_PTR(old); - unsigned long prev, new; - - memcpy(p, old_p, sizeof(u32) * RTAX_MAX); - - new = (unsigned long) p; - prev = cmpxchg(&dst->_metrics, old, new); - - if (prev != old) { - kfree(p); - p = __DST_METRICS_PTR(prev); - if (prev & DST_METRICS_READ_ONLY) - p = NULL; - } - } - return p; -} -EXPORT_SYMBOL(dst_cow_metrics_generic); - -/* Caller asserts that dst_metrics_read_only(dst) is false. */ -void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) -{ - unsigned long prev, new; - - new = (unsigned long) dst_default_metrics; - prev = cmpxchg(&dst->_metrics, old, new); - if (prev == old) - kfree(__DST_METRICS_PTR(old)); -} -EXPORT_SYMBOL(__dst_destroy_metrics_generic); - /** * skb_dst_set_noref - sets skb dst, without a reference * @skb: buffer diff --git a/trunk/net/core/ethtool.c b/trunk/net/core/ethtool.c index 5984ee0c7136..17741782a345 100644 --- a/trunk/net/core/ethtool.c +++ b/trunk/net/core/ethtool.c @@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) if (regs.len > reglen) regs.len = reglen; - regbuf = vzalloc(reglen); + regbuf = vmalloc(reglen); if (!regbuf) return -ENOMEM; @@ -1458,7 +1458,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; - u32 old_features; + unsigned long old_features; if (!dev || !netif_device_present(dev)) return -ENODEV; diff --git a/trunk/net/core/filter.c b/trunk/net/core/filter.c index 232b1873bb28..afc58374ca96 100644 --- a/trunk/net/core/filter.c +++ b/trunk/net/core/filter.c @@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) if (err) return err; - rcu_read_lock(); - filter = rcu_dereference(sk->sk_filter); + rcu_read_lock_bh(); + filter = rcu_dereference_bh(sk->sk_filter); if (filter) { unsigned int pkt_len = sk_run_filter(skb, filter->insns); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } - rcu_read_unlock(); + rcu_read_unlock_bh(); return err; } diff --git a/trunk/net/core/neighbour.c b/trunk/net/core/neighbour.c index 799f06e03a22..60a902913429 100644 --- a/trunk/net/core/neighbour.c +++ b/trunk/net/core/neighbour.c @@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries) { size_t size = entries * sizeof(struct neighbour *); struct neigh_hash_table *ret; - struct neighbour __rcu **buckets; + struct neighbour **buckets; ret = kmalloc(sizeof(*ret), GFP_ATOMIC); if (!ret) @@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries) if (size <= PAGE_SIZE) buckets = kzalloc(size, GFP_ATOMIC); else - buckets = (struct neighbour __rcu **) + buckets = (struct neighbour **) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, get_order(size)); if (!buckets) { kfree(ret); return NULL; } - ret->hash_buckets = buckets; + rcu_assign_pointer(ret->hash_buckets, buckets); ret->hash_mask = entries - 1; get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd)); return ret; @@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head) struct neigh_hash_table, rcu); size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *); - struct neighbour __rcu **buckets = nht->hash_buckets; + struct neighbour **buckets = nht->hash_buckets; if (size <= PAGE_SIZE) kfree(buckets); @@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) panic("cannot create neighbour proc dir entry"); #endif - RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8)); + tbl->nht = neigh_hash_alloc(8); phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); @@ -1602,8 +1602,7 @@ int neigh_table_clear(struct neigh_table *tbl) } write_unlock(&neigh_tbl_lock); - call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, - neigh_hash_free_rcu); + call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu); tbl->nht = NULL; kfree(tbl->phash_buckets); diff --git a/trunk/net/core/net-sysfs.c b/trunk/net/core/net-sysfs.c index 2e4a393dfc3b..e23c01be5a5b 100644 --- a/trunk/net/core/net-sysfs.c +++ b/trunk/net/core/net-sysfs.c @@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec); NETDEVICE_SHOW(addr_len, fmt_dec); NETDEVICE_SHOW(iflink, fmt_dec); NETDEVICE_SHOW(ifindex, fmt_dec); -NETDEVICE_SHOW(features, fmt_hex); +NETDEVICE_SHOW(features, fmt_long_hex); NETDEVICE_SHOW(type, fmt_dec); NETDEVICE_SHOW(link_mode, fmt_dec); @@ -295,20 +295,6 @@ static ssize_t show_ifalias(struct device *dev, return ret; } -NETDEVICE_SHOW(group, fmt_dec); - -static int change_group(struct net_device *net, unsigned long new_group) -{ - dev_set_group(net, (int) new_group); - return 0; -} - -static ssize_t store_group(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - return netdev_store(dev, attr, buf, len, change_group); -} - static struct device_attribute net_class_attributes[] = { __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), @@ -330,7 +316,6 @@ static struct device_attribute net_class_attributes[] = { __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, store_tx_queue_len), - __ATTR(group, S_IRUGO | S_IWUSR, show_group, store_group), {} }; diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index d73b77adb676..a9e7fc4c461f 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -251,7 +251,6 @@ struct pktgen_dev { int max_pkt_size; /* = ETH_ZLEN; */ int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ int nfrags; - struct page *page; u64 delay; /* nano-seconds */ __u64 count; /* Default No packets to send */ @@ -1135,10 +1134,6 @@ static ssize_t pktgen_if_write(struct file *file, if (node_possible(value)) { pkt_dev->node = value; sprintf(pg_result, "OK: node=%d", pkt_dev->node); - if (pkt_dev->page) { - put_page(pkt_dev->page); - pkt_dev->page = NULL; - } } else sprintf(pg_result, "ERROR: node not possible"); @@ -2610,90 +2605,6 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi, return htons(id | (cfi << 12) | (prio << 13)); } -static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, - int datalen) -{ - struct timeval timestamp; - struct pktgen_hdr *pgh; - - pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh)); - datalen -= sizeof(*pgh); - - if (pkt_dev->nfrags <= 0) { - pgh = (struct pktgen_hdr *)skb_put(skb, datalen); - memset(pgh + 1, 0, datalen); - } else { - int frags = pkt_dev->nfrags; - int i, len; - - - if (frags > MAX_SKB_FRAGS) - frags = MAX_SKB_FRAGS; - len = datalen - frags * PAGE_SIZE; - if (len > 0) { - memset(skb_put(skb, len), 0, len); - datalen = frags * PAGE_SIZE; - } - - i = 0; - while (datalen > 0) { - if (unlikely(!pkt_dev->page)) { - int node = numa_node_id(); - - if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE)) - node = pkt_dev->node; - pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); - if (!pkt_dev->page) - break; - } - skb_shinfo(skb)->frags[i].page = pkt_dev->page; - get_page(pkt_dev->page); - skb_shinfo(skb)->frags[i].page_offset = 0; - skb_shinfo(skb)->frags[i].size = - (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); - datalen -= skb_shinfo(skb)->frags[i].size; - skb->len += skb_shinfo(skb)->frags[i].size; - skb->data_len += skb_shinfo(skb)->frags[i].size; - i++; - skb_shinfo(skb)->nr_frags = i; - } - - while (i < frags) { - int rem; - - if (i == 0) - break; - - rem = skb_shinfo(skb)->frags[i - 1].size / 2; - if (rem == 0) - break; - - skb_shinfo(skb)->frags[i - 1].size -= rem; - - skb_shinfo(skb)->frags[i] = - skb_shinfo(skb)->frags[i - 1]; - get_page(skb_shinfo(skb)->frags[i].page); - skb_shinfo(skb)->frags[i].page = - skb_shinfo(skb)->frags[i - 1].page; - skb_shinfo(skb)->frags[i].page_offset += - skb_shinfo(skb)->frags[i - 1].size; - skb_shinfo(skb)->frags[i].size = rem; - i++; - skb_shinfo(skb)->nr_frags = i; - } - } - - /* Stamp the time, and sequence number, - * convert them to network byte order - */ - pgh->pgh_magic = htonl(PKTGEN_MAGIC); - pgh->seq_num = htonl(pkt_dev->seq_num); - - do_gettimeofday(×tamp); - pgh->tv_sec = htonl(timestamp.tv_sec); - pgh->tv_usec = htonl(timestamp.tv_usec); -} - static struct sk_buff *fill_packet_ipv4(struct net_device *odev, struct pktgen_dev *pkt_dev) { @@ -2702,6 +2613,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, struct udphdr *udph; int datalen, iplen; struct iphdr *iph; + struct pktgen_hdr *pgh = NULL; __be16 protocol = htons(ETH_P_IP); __be32 *mpls; __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ @@ -2817,7 +2729,76 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, pkt_dev->pkt_overhead); skb->dev = odev; skb->pkt_type = PACKET_HOST; - pktgen_finalize_skb(pkt_dev, skb, datalen); + + if (pkt_dev->nfrags <= 0) { + pgh = (struct pktgen_hdr *)skb_put(skb, datalen); + memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr)); + } else { + int frags = pkt_dev->nfrags; + int i, len; + + pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); + + if (frags > MAX_SKB_FRAGS) + frags = MAX_SKB_FRAGS; + if (datalen > frags * PAGE_SIZE) { + len = datalen - frags * PAGE_SIZE; + memset(skb_put(skb, len), 0, len); + datalen = frags * PAGE_SIZE; + } + + i = 0; + while (datalen > 0) { + struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + skb_shinfo(skb)->frags[i].page = page; + skb_shinfo(skb)->frags[i].page_offset = 0; + skb_shinfo(skb)->frags[i].size = + (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); + datalen -= skb_shinfo(skb)->frags[i].size; + skb->len += skb_shinfo(skb)->frags[i].size; + skb->data_len += skb_shinfo(skb)->frags[i].size; + i++; + skb_shinfo(skb)->nr_frags = i; + } + + while (i < frags) { + int rem; + + if (i == 0) + break; + + rem = skb_shinfo(skb)->frags[i - 1].size / 2; + if (rem == 0) + break; + + skb_shinfo(skb)->frags[i - 1].size -= rem; + + skb_shinfo(skb)->frags[i] = + skb_shinfo(skb)->frags[i - 1]; + get_page(skb_shinfo(skb)->frags[i].page); + skb_shinfo(skb)->frags[i].page = + skb_shinfo(skb)->frags[i - 1].page; + skb_shinfo(skb)->frags[i].page_offset += + skb_shinfo(skb)->frags[i - 1].size; + skb_shinfo(skb)->frags[i].size = rem; + i++; + skb_shinfo(skb)->nr_frags = i; + } + } + + /* Stamp the time, and sequence number, + * convert them to network byte order + */ + if (pgh) { + struct timeval timestamp; + + pgh->pgh_magic = htonl(PKTGEN_MAGIC); + pgh->seq_num = htonl(pkt_dev->seq_num); + + do_gettimeofday(×tamp); + pgh->tv_sec = htonl(timestamp.tv_sec); + pgh->tv_usec = htonl(timestamp.tv_usec); + } #ifdef CONFIG_XFRM if (!process_ipsec(pkt_dev, skb, protocol)) @@ -2999,6 +2980,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, struct udphdr *udph; int datalen; struct ipv6hdr *iph; + struct pktgen_hdr *pgh = NULL; __be16 protocol = htons(ETH_P_IPV6); __be32 *mpls; __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ @@ -3101,7 +3083,75 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; - pktgen_finalize_skb(pkt_dev, skb, datalen); + if (pkt_dev->nfrags <= 0) + pgh = (struct pktgen_hdr *)skb_put(skb, datalen); + else { + int frags = pkt_dev->nfrags; + int i; + + pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); + + if (frags > MAX_SKB_FRAGS) + frags = MAX_SKB_FRAGS; + if (datalen > frags * PAGE_SIZE) { + skb_put(skb, datalen - frags * PAGE_SIZE); + datalen = frags * PAGE_SIZE; + } + + i = 0; + while (datalen > 0) { + struct page *page = alloc_pages(GFP_KERNEL, 0); + skb_shinfo(skb)->frags[i].page = page; + skb_shinfo(skb)->frags[i].page_offset = 0; + skb_shinfo(skb)->frags[i].size = + (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); + datalen -= skb_shinfo(skb)->frags[i].size; + skb->len += skb_shinfo(skb)->frags[i].size; + skb->data_len += skb_shinfo(skb)->frags[i].size; + i++; + skb_shinfo(skb)->nr_frags = i; + } + + while (i < frags) { + int rem; + + if (i == 0) + break; + + rem = skb_shinfo(skb)->frags[i - 1].size / 2; + if (rem == 0) + break; + + skb_shinfo(skb)->frags[i - 1].size -= rem; + + skb_shinfo(skb)->frags[i] = + skb_shinfo(skb)->frags[i - 1]; + get_page(skb_shinfo(skb)->frags[i].page); + skb_shinfo(skb)->frags[i].page = + skb_shinfo(skb)->frags[i - 1].page; + skb_shinfo(skb)->frags[i].page_offset += + skb_shinfo(skb)->frags[i - 1].size; + skb_shinfo(skb)->frags[i].size = rem; + i++; + skb_shinfo(skb)->nr_frags = i; + } + } + + /* Stamp the time, and sequence number, + * convert them to network byte order + * should we update cloned packets too ? + */ + if (pgh) { + struct timeval timestamp; + + pgh->pgh_magic = htonl(PKTGEN_MAGIC); + pgh->seq_num = htonl(pkt_dev->seq_num); + + do_gettimeofday(×tamp); + pgh->tv_sec = htonl(timestamp.tv_sec); + pgh->tv_usec = htonl(timestamp.tv_usec); + } + /* pkt_dev->seq_num++; FF: you really mean this? */ return skb; } @@ -3834,8 +3884,6 @@ static int pktgen_remove_device(struct pktgen_thread *t, free_SAs(pkt_dev); #endif vfree(pkt_dev->flows); - if (pkt_dev->page) - put_page(pkt_dev->page); kfree(pkt_dev); return 0; } diff --git a/trunk/net/core/rtnetlink.c b/trunk/net/core/rtnetlink.c index da0fe457c858..750db57f3bb3 100644 --- a/trunk/net/core/rtnetlink.c +++ b/trunk/net/core/rtnetlink.c @@ -868,7 +868,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, netif_running(dev) ? dev->operstate : IF_OPER_DOWN); NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U32(skb, IFLA_GROUP, dev->group); if (dev->ifindex != dev->iflink) NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); @@ -1122,7 +1121,8 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return -EOPNOTSUPP; if (af_ops->validate_link_af) { - err = af_ops->validate_link_af(dev, af); + err = af_ops->validate_link_af(dev, + tb[IFLA_AF_SPEC]); if (err < 0) return err; } @@ -1265,11 +1265,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, modified = 1; } - if (tb[IFLA_GROUP]) { - dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); - modified = 1; - } - /* * Interface selected by interface index but interface * name provided implies that a name change has been @@ -1547,8 +1542,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); - if (tb[IFLA_GROUP]) - dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); return dev; @@ -1559,24 +1552,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, } EXPORT_SYMBOL(rtnl_create_link); -static int rtnl_group_changelink(struct net *net, int group, - struct ifinfomsg *ifm, - struct nlattr **tb) -{ - struct net_device *dev; - int err; - - for_each_netdev(net, dev) { - if (dev->group == group) { - err = do_setlink(dev, ifm, tb, NULL, 0); - if (err < 0) - return err; - } - } - - return 0; -} - static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -1604,12 +1579,10 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(net, ifm->ifi_index); - else { - if (ifname[0]) - dev = __dev_get_by_name(net, ifname); - else - dev = NULL; - } + else if (ifname[0]) + dev = __dev_get_by_name(net, ifname); + else + dev = NULL; err = validate_linkmsg(dev, tb); if (err < 0) @@ -1673,13 +1646,8 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return do_setlink(dev, ifm, tb, ifname, modified); } - if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) - return rtnl_group_changelink(net, - nla_get_u32(tb[IFLA_GROUP]), - ifm, tb); + if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -ENODEV; - } if (ifm->ifi_index) return -EOPNOTSUPP; @@ -1704,9 +1672,6 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); dest_net = rtnl_link_get_net(net, tb); - if (IS_ERR(dest_net)) - return PTR_ERR(dest_net); - dev = rtnl_create_link(net, dest_net, ifname, ops, tb); if (IS_ERR(dev)) diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index 14cf560b4a3e..d31bb36ae0dc 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -210,7 +210,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); if (fclone) { struct sk_buff *child = skb + 1; @@ -2498,7 +2497,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ -struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) +struct sk_buff *skb_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; @@ -2508,7 +2507,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) unsigned int offset = doffset; unsigned int headroom; unsigned int len; - int sg = !!(features & NETIF_F_SG); + int sg = features & NETIF_F_SG; int nfrags = skb_shinfo(skb)->nr_frags; int err = -ENOMEM; int i = 0; @@ -2745,12 +2744,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) merge: if (offset > headlen) { - unsigned int eat = offset - headlen; - - skbinfo->frags[0].page_offset += eat; - skbinfo->frags[0].size -= eat; - skb->data_len -= eat; - skb->len -= eat; + skbinfo->frags[0].page_offset += offset - headlen; + skbinfo->frags[0].size -= offset - headlen; offset = headlen; } diff --git a/trunk/net/dcb/dcbnl.c b/trunk/net/dcb/dcbnl.c index 6b03f561caec..d900ab99814a 100644 --- a/trunk/net/dcb/dcbnl.c +++ b/trunk/net/dcb/dcbnl.c @@ -583,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, u8 up, idtype; int ret = -EINVAL; - if (!tb[DCB_ATTR_APP]) + if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp) goto out; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], @@ -604,16 +604,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, goto out; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); - - if (netdev->dcbnl_ops->getapp) { - up = netdev->dcbnl_ops->getapp(netdev, idtype, id); - } else { - struct dcb_app app = { - .selector = idtype, - .protocol = id, - }; - up = dcb_getapp(netdev, &app); - } + up = netdev->dcbnl_ops->getapp(netdev, idtype, id); /* send this back */ dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); diff --git a/trunk/net/decnet/dn_route.c b/trunk/net/decnet/dn_route.c index 42c9c62d3417..5e636365d33c 100644 --- a/trunk/net/decnet/dn_route.c +++ b/trunk/net/decnet/dn_route.c @@ -112,7 +112,6 @@ static int dn_dst_gc(struct dst_ops *ops); static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); static unsigned int dn_dst_default_mtu(const struct dst_entry *dst); -static void dn_dst_destroy(struct dst_entry *); static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); static void dn_dst_link_failure(struct sk_buff *); static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); @@ -134,18 +133,11 @@ static struct dst_ops dn_dst_ops = { .check = dn_dst_check, .default_advmss = dn_dst_default_advmss, .default_mtu = dn_dst_default_mtu, - .cow_metrics = dst_cow_metrics_generic, - .destroy = dn_dst_destroy, .negative_advice = dn_dst_negative_advice, .link_failure = dn_dst_link_failure, .update_pmtu = dn_dst_update_pmtu, }; -static void dn_dst_destroy(struct dst_entry *dst) -{ - dst_destroy_metrics_generic(dst); -} - static __inline__ unsigned dn_hash(__le16 src, __le16 dst) { __u16 tmp = (__u16 __force)(src ^ dst); @@ -822,14 +814,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) { struct dn_fib_info *fi = res->fi; struct net_device *dev = rt->dst.dev; - unsigned int mss_metric; struct neighbour *n; + unsigned int metric; if (fi) { if (DN_FIB_RES_GW(*res) && DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = DN_FIB_RES_GW(*res); - dst_init_metrics(&rt->dst, fi->fib_metrics, true); + dst_import_metrics(&rt->dst, fi->fib_metrics); } rt->rt_type = res->type; @@ -842,10 +834,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); - mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); - if (mss_metric) { + metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); + if (metric) { unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); - if (mss_metric > mss) + if (metric > mss) dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); } return 0; diff --git a/trunk/net/decnet/dn_table.c b/trunk/net/decnet/dn_table.c index b66600b3f4b5..f2abd3755690 100644 --- a/trunk/net/decnet/dn_table.c +++ b/trunk/net/decnet/dn_table.c @@ -59,6 +59,7 @@ struct dn_hash }; #define dz_key_0(key) ((key).datum = 0) +#define dz_prefix(key,dz) ((key).datum) #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) diff --git a/trunk/net/dsa/dsa.c b/trunk/net/dsa/dsa.c index 3fb14b7c13cf..0c877a74e1f4 100644 --- a/trunk/net/dsa/dsa.c +++ b/trunk/net/dsa/dsa.c @@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void) } module_exit(dsa_cleanup_module); -MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_AUTHOR("Lennert Buytenhek ") MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:dsa"); diff --git a/trunk/net/econet/af_econet.c b/trunk/net/econet/af_econet.c index 0c2826337919..15dcc1a586b4 100644 --- a/trunk/net/econet/af_econet.c +++ b/trunk/net/econet/af_econet.c @@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result) static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { + struct sock *sk = sock->sk; struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name; struct net_device *dev; struct ec_addr addr; int err; unsigned char port, cb; #if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) - struct sock *sk = sock->sk; struct sk_buff *skb; struct ec_cb *eb; #endif @@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, error_free_buf: vfree(userbuf); -error: #else err = -EPROTOTYPE; #endif + error: mutex_unlock(&econet_mutex); return err; diff --git a/trunk/net/ipv4/Kconfig b/trunk/net/ipv4/Kconfig index 8949a05ac307..a5a1050595d1 100644 --- a/trunk/net/ipv4/Kconfig +++ b/trunk/net/ipv4/Kconfig @@ -140,9 +140,6 @@ config IP_ROUTE_VERBOSE handled by the klogd daemon which is responsible for kernel messages ("man klogd"). -config IP_ROUTE_CLASSID - bool - config IP_PNP bool "IP: kernel level autoconfiguration" help @@ -660,3 +657,4 @@ config TCP_MD5SIG on the Internet. If unsure, say N. + diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index 7ceb80447631..f2b61107df6c 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -880,19 +880,6 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } EXPORT_SYMBOL(inet_ioctl); -#ifdef CONFIG_COMPAT -int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) -{ - struct sock *sk = sock->sk; - int err = -ENOIOCTLCMD; - - if (sk->sk_prot->compat_ioctl) - err = sk->sk_prot->compat_ioctl(sk, cmd, arg); - - return err; -} -#endif - const struct proto_ops inet_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, @@ -916,7 +903,6 @@ const struct proto_ops inet_stream_ops = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, - .compat_ioctl = inet_compat_ioctl, #endif }; EXPORT_SYMBOL(inet_stream_ops); @@ -943,7 +929,6 @@ const struct proto_ops inet_dgram_ops = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, - .compat_ioctl = inet_compat_ioctl, #endif }; EXPORT_SYMBOL(inet_dgram_ops); @@ -974,7 +959,6 @@ static const struct proto_ops inet_sockraw_ops = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, - .compat_ioctl = inet_compat_ioctl, #endif }; @@ -1231,7 +1215,7 @@ static int inet_gso_send_check(struct sk_buff *skb) return err; } -static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features) +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct iphdr *iph; diff --git a/trunk/net/ipv4/arp.c b/trunk/net/ipv4/arp.c index 7927589813b5..04c8b69fd426 100644 --- a/trunk/net/ipv4/arp.c +++ b/trunk/net/ipv4/arp.c @@ -1017,13 +1017,14 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; return 0; } - if (__in_dev_get_rtnl(dev)) { - IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on); + if (__in_dev_get_rcu(dev)) { + IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on); return 0; } return -ENXIO; } +/* must be called with rcu_read_lock() */ static int arp_req_set_public(struct net *net, struct arpreq *r, struct net_device *dev) { @@ -1232,10 +1233,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) if (!(r.arp_flags & ATF_NETMASK)) ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = htonl(0xFFFFFFFFUL); - rtnl_lock(); + rcu_read_lock(); if (r.arp_dev[0]) { err = -ENODEV; - dev = __dev_get_by_name(net, r.arp_dev); + dev = dev_get_by_name_rcu(net, r.arp_dev); if (dev == NULL) goto out; @@ -1262,7 +1263,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) break; } out: - rtnl_unlock(); + rcu_read_unlock(); if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r))) err = -EFAULT; return err; diff --git a/trunk/net/ipv4/fib_rules.c b/trunk/net/ipv4/fib_rules.c index 9cefe72029cf..7981a24f5c7b 100644 --- a/trunk/net/ipv4/fib_rules.c +++ b/trunk/net/ipv4/fib_rules.c @@ -41,12 +41,12 @@ struct fib4_rule { __be32 srcmask; __be32 dst; __be32 dstmask; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE u32 tclassid; #endif }; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE u32 fib_rules_tclass(struct fib_result *res) { return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0; @@ -165,7 +165,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, if (frh->dst_len) rule4->dst = nla_get_be32(tb[FRA_DST]); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (tb[FRA_FLOW]) rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); #endif @@ -195,7 +195,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, if (frh->tos && (rule4->tos != frh->tos)) return 0; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) return 0; #endif @@ -224,7 +224,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, if (rule4->src_len) NLA_PUT_BE32(skb, FRA_SRC, rule4->src); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (rule4->tclassid) NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); #endif diff --git a/trunk/net/ipv4/fib_semantics.c b/trunk/net/ipv4/fib_semantics.c index 48e93a560077..12d3dc3df1b7 100644 --- a/trunk/net/ipv4/fib_semantics.c +++ b/trunk/net/ipv4/fib_semantics.c @@ -152,8 +152,6 @@ static void free_fib_info_rcu(struct rcu_head *head) { struct fib_info *fi = container_of(head, struct fib_info, rcu); - if (fi->fib_metrics != (u32 *) dst_default_metrics) - kfree(fi->fib_metrics); kfree(fi); } @@ -202,7 +200,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->nh_weight != onh->nh_weight || #endif -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE nh->nh_tclassid != onh->nh_tclassid || #endif ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) @@ -424,7 +422,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, nla = nla_find(attrs, attrlen, RTA_GATEWAY); nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE nla = nla_find(attrs, attrlen, RTA_FLOW); nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; #endif @@ -478,7 +476,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla && nla_get_be32(nla) != nh->nh_gw) return 1; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE nla = nla_find(attrs, attrlen, RTA_FLOW); if (nla && nla_get_u32(nla) != nh->nh_tclassid) return 1; @@ -744,12 +742,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (fi == NULL) goto failure; - if (cfg->fc_mx) { - fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); - if (!fi->fib_metrics) - goto failure; - } else - fi->fib_metrics = (u32 *) dst_default_metrics; fib_info_cnt++; fi->fib_net = hold_net(net); @@ -787,7 +779,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) goto err_inval; if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) goto err_inval; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) goto err_inval; #endif @@ -800,7 +792,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) nh->nh_oif = cfg->fc_oif; nh->nh_gw = cfg->fc_gw; nh->nh_flags = cfg->fc_flags; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE nh->nh_tclassid = cfg->fc_flow; #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -1010,7 +1002,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, if (fi->fib_nh->nh_oif) NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (fi->fib_nh[0].nh_tclassid) NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); #endif @@ -1035,7 +1027,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, if (nh->nh_gw) NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (nh->nh_tclassid) NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); #endif diff --git a/trunk/net/ipv4/inetpeer.c b/trunk/net/ipv4/inetpeer.c index b6513b13d729..d9bc85751c74 100644 --- a/trunk/net/ipv4/inetpeer.c +++ b/trunk/net/ipv4/inetpeer.c @@ -475,7 +475,7 @@ static int cleanup_once(unsigned long ttl) struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) { struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; - struct inet_peer_base *base = family_to_base(daddr->family); + struct inet_peer_base *base = family_to_base(AF_INET); struct inet_peer *p; /* Look up for the address quickly, lockless. @@ -512,7 +512,6 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) atomic_set(&p->rid, 0); atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4)); p->tcp_ts_stamp = 0; - p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; INIT_LIST_HEAD(&p->unused); diff --git a/trunk/net/ipv4/ip_input.c b/trunk/net/ipv4/ip_input.c index d7b2b0987a3b..d859bcc26cb7 100644 --- a/trunk/net/ipv4/ip_input.c +++ b/trunk/net/ipv4/ip_input.c @@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb) } } -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (unlikely(skb_dst(skb)->tclassid)) { struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); u32 idx = skb_dst(skb)->tclassid; diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index 7e41ac0b9260..3f3a9afd73e0 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -60,7 +60,6 @@ #include #include #include -#include #include #include #include @@ -1435,51 +1434,6 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) } } -#ifdef CONFIG_COMPAT -struct compat_sioc_sg_req { - struct in_addr src; - struct in_addr grp; - compat_ulong_t pktcnt; - compat_ulong_t bytecnt; - compat_ulong_t wrong_if; -}; - -int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) -{ - struct sioc_sg_req sr; - struct mfc_cache *c; - struct net *net = sock_net(sk); - struct mr_table *mrt; - - mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); - if (mrt == NULL) - return -ENOENT; - - switch (cmd) { - case SIOCGETSGCNT: - if (copy_from_user(&sr, arg, sizeof(sr))) - return -EFAULT; - - rcu_read_lock(); - c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); - if (c) { - sr.pktcnt = c->mfc_un.res.pkt; - sr.bytecnt = c->mfc_un.res.bytes; - sr.wrong_if = c->mfc_un.res.wrong_if; - rcu_read_unlock(); - - if (copy_to_user(arg, &sr, sizeof(sr))) - return -EFAULT; - return 0; - } - rcu_read_unlock(); - return -EADDRNOTAVAIL; - default: - return -ENOIOCTLCMD; - } -} -#endif - static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { diff --git a/trunk/net/ipv4/netfilter/Kconfig b/trunk/net/ipv4/netfilter/Kconfig index f926a310075d..babd1a2bae5f 100644 --- a/trunk/net/ipv4/netfilter/Kconfig +++ b/trunk/net/ipv4/netfilter/Kconfig @@ -206,9 +206,8 @@ config IP_NF_TARGET_REDIRECT config NF_NAT_SNMP_BASIC tristate "Basic SNMP-ALG support" - depends on NF_CONNTRACK_SNMP && NF_NAT + depends on NF_NAT depends on NETFILTER_ADVANCED - default NF_NAT && NF_CONNTRACK_SNMP ---help--- This module implements an Application Layer Gateway (ALG) for diff --git a/trunk/net/ipv4/netfilter/arp_tables.c b/trunk/net/ipv4/netfilter/arp_tables.c index e95054c690c6..e855fffaed95 100644 --- a/trunk/net/ipv4/netfilter/arp_tables.c +++ b/trunk/net/ipv4/netfilter/arp_tables.c @@ -866,7 +866,6 @@ static int compat_table_info(const struct xt_table_info *info, memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries[raw_smp_processor_id()]; - xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) @@ -1334,7 +1333,6 @@ static int translate_compat_table(const char *name, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); - xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, diff --git a/trunk/net/ipv4/netfilter/ip_tables.c b/trunk/net/ipv4/netfilter/ip_tables.c index ef7d7b9680ea..652efea013dc 100644 --- a/trunk/net/ipv4/netfilter/ip_tables.c +++ b/trunk/net/ipv4/netfilter/ip_tables.c @@ -1063,7 +1063,6 @@ static int compat_table_info(const struct xt_table_info *info, memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries[raw_smp_processor_id()]; - xt_compat_init_offsets(AF_INET, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) @@ -1665,7 +1664,6 @@ translate_compat_table(struct net *net, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); - xt_compat_init_offsets(AF_INET, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, diff --git a/trunk/net/ipv4/netfilter/ipt_CLUSTERIP.c b/trunk/net/ipv4/netfilter/ipt_CLUSTERIP.c index 403ca57f6011..1e26a4897655 100644 --- a/trunk/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/trunk/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -300,8 +300,13 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) * that the ->target() function isn't called after ->destroy() */ ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL) + if (ct == NULL) { + pr_info("no conntrack!\n"); + /* FIXME: need to drop invalid ones, since replies + * to outgoing connections of other nodes will be + * marked as INVALID */ return NF_DROP; + } /* special case: ICMP error handling. conntrack distinguishes between * error messages (RELATED) and information requests (see below) */ diff --git a/trunk/net/ipv4/netfilter/ipt_LOG.c b/trunk/net/ipv4/netfilter/ipt_LOG.c index d76d6c9ed946..72ffc8fda2e9 100644 --- a/trunk/net/ipv4/netfilter/ipt_LOG.c +++ b/trunk/net/ipv4/netfilter/ipt_LOG.c @@ -442,7 +442,8 @@ ipt_log_packet(u_int8_t pf, } #endif - if (in != NULL) + /* MAC logging for input path only. */ + if (in && !out) dump_mac_header(m, loginfo, skb); dump_packet(m, loginfo, skb, 0); diff --git a/trunk/net/ipv4/netfilter/iptable_mangle.c b/trunk/net/ipv4/netfilter/iptable_mangle.c index aef5d1fbe77d..294a2a32f293 100644 --- a/trunk/net/ipv4/netfilter/iptable_mangle.c +++ b/trunk/net/ipv4/netfilter/iptable_mangle.c @@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, dev_net(out)->ipv4.iptable_mangle); /* Reroute for ANY change. */ - if (ret != NF_DROP && ret != NF_STOLEN) { + if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { iph = ip_hdr(skb); if (iph->saddr != saddr || diff --git a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 5585980fce2e..63f60fc5d26a 100644 --- a/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/trunk/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -20,7 +20,6 @@ #include #include #include -#include struct ct_iter_state { struct seq_net_private p; @@ -36,8 +35,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) for (st->bucket = 0; st->bucket < net->ct.htable_size; st->bucket++) { - n = rcu_dereference( - hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) return n; } @@ -50,14 +48,13 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; - head = rcu_dereference(hlist_nulls_next_rcu(head)); + head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { if (++st->bucket >= net->ct.htable_size) return NULL; } - head = rcu_dereference( - hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + head = rcu_dereference(net->ct.hash[st->bucket].first); } return head; } @@ -220,8 +217,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq) struct hlist_node *n; for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { - n = rcu_dereference( - hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + n = rcu_dereference(net->ct.expect_hash[st->bucket].first); if (n) return n; } @@ -234,12 +230,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; - head = rcu_dereference(hlist_next_rcu(head)); + head = rcu_dereference(head->next); while (head == NULL) { if (++st->bucket >= nf_ct_expect_hsize) return NULL; - head = rcu_dereference( - hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + head = rcu_dereference(net->ct.expect_hash[st->bucket].first); } return head; } diff --git a/trunk/net/ipv4/netfilter/nf_nat_amanda.c b/trunk/net/ipv4/netfilter/nf_nat_amanda.c index 703f366fd235..0f23b3f06df0 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_amanda.c +++ b/trunk/net/ipv4/netfilter/nf_nat_amanda.c @@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb, /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - int res; + int ret; exp->tuple.dst.u.tcp.port = htons(port); - res = nf_ct_expect_related(exp); - if (res == 0) + ret = nf_ct_expect_related(exp); + if (ret == 0) break; - else if (res != -EBUSY) { + else if (ret != -EBUSY) { port = 0; break; } diff --git a/trunk/net/ipv4/netfilter/nf_nat_core.c b/trunk/net/ipv4/netfilter/nf_nat_core.c index 21bcf471b25a..c04787ce1a71 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_core.c +++ b/trunk/net/ipv4/netfilter/nf_nat_core.c @@ -221,14 +221,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, manips not an issue. */ if (maniptype == IP_NAT_MANIP_SRC && !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { - /* try the original tuple first */ - if (in_range(orig_tuple, range)) { - if (!nf_nat_used_tuple(orig_tuple, ct)) { - *tuple = *orig_tuple; - return; - } - } else if (find_appropriate_src(net, zone, orig_tuple, tuple, - range)) { + if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) { pr_debug("get_unique_tuple: Found current src map\n"); if (!nf_nat_used_tuple(tuple, ct)) return; @@ -273,6 +266,7 @@ nf_nat_setup_info(struct nf_conn *ct, struct net *net = nf_ct_net(ct); struct nf_conntrack_tuple curr_tuple, new_tuple; struct nf_conn_nat *nat; + int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); /* nat helper or nfctnetlink also setup binding */ nat = nfct_nat(ct); @@ -312,7 +306,8 @@ nf_nat_setup_info(struct nf_conn *ct, ct->status |= IPS_DST_NAT; } - if (maniptype == IP_NAT_MANIP_SRC) { + /* Place in source hash if this is the first time. */ + if (have_to_hash) { unsigned int srchash; srchash = hash_by_src(net, nf_ct_zone(ct), @@ -328,9 +323,9 @@ nf_nat_setup_info(struct nf_conn *ct, /* It's done. */ if (maniptype == IP_NAT_MANIP_DST) - ct->status |= IPS_DST_NAT_DONE; + set_bit(IPS_DST_NAT_DONE_BIT, &ct->status); else - ct->status |= IPS_SRC_NAT_DONE; + set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); return NF_ACCEPT; } @@ -507,10 +502,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto) int ret = 0; spin_lock_bh(&nf_nat_lock); - if (rcu_dereference_protected( - nf_nat_protos[proto->protonum], - lockdep_is_held(&nf_nat_lock) - ) != &nf_nat_unknown_protocol) { + if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) { ret = -EBUSY; goto out; } @@ -540,7 +532,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct) if (nat == NULL || nat->ct == NULL) return; - NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE); + NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK); spin_lock_bh(&nf_nat_lock); hlist_del_rcu(&nat->bysource); @@ -553,10 +545,11 @@ static void nf_nat_move_storage(void *new, void *old) struct nf_conn_nat *old_nat = old; struct nf_conn *ct = old_nat->ct; - if (!ct || !(ct->status & IPS_SRC_NAT_DONE)) + if (!ct || !(ct->status & IPS_NAT_DONE_MASK)) return; spin_lock_bh(&nf_nat_lock); + new_nat->ct = ct; hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); spin_unlock_bh(&nf_nat_lock); } @@ -686,7 +679,8 @@ static int __net_init nf_nat_net_init(struct net *net) { /* Leave them the same for the moment. */ net->ipv4.nat_htable_size = net->ct.htable_size; - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0); + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, + &net->ipv4.nat_vmalloced, 0); if (!net->ipv4.nat_bysource) return -ENOMEM; return 0; @@ -708,7 +702,8 @@ static void __net_exit nf_nat_net_exit(struct net *net) { nf_ct_iterate_cleanup(net, &clean_nat, NULL); synchronize_rcu(); - nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size); + nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, + net->ipv4.nat_htable_size); } static struct pernet_operations nf_nat_net_ops = { diff --git a/trunk/net/ipv4/netfilter/nf_nat_snmp_basic.c b/trunk/net/ipv4/netfilter/nf_nat_snmp_basic.c index 8812a02078ab..ee5f419d0a56 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/trunk/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -54,7 +54,6 @@ #include #include #include -#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris "); @@ -1311,9 +1310,9 @@ static int __init nf_nat_snmp_basic_init(void) { int ret = 0; - BUG_ON(nf_nat_snmp_hook != NULL); - rcu_assign_pointer(nf_nat_snmp_hook, help); - + ret = nf_conntrack_helper_register(&snmp_helper); + if (ret < 0) + return ret; ret = nf_conntrack_helper_register(&snmp_trap_helper); if (ret < 0) { nf_conntrack_helper_unregister(&snmp_helper); @@ -1324,7 +1323,7 @@ static int __init nf_nat_snmp_basic_init(void) static void __exit nf_nat_snmp_basic_fini(void) { - rcu_assign_pointer(nf_nat_snmp_hook, NULL); + nf_conntrack_helper_unregister(&snmp_helper); nf_conntrack_helper_unregister(&snmp_trap_helper); } diff --git a/trunk/net/ipv4/raw.c b/trunk/net/ipv4/raw.c index 6390ba299b3d..a3d5ab786e81 100644 --- a/trunk/net/ipv4/raw.c +++ b/trunk/net/ipv4/raw.c @@ -76,7 +76,6 @@ #include #include #include -#include static struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), @@ -839,23 +838,6 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) } } -#ifdef CONFIG_COMPAT -static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - case SIOCOUTQ: - case SIOCINQ: - return -ENOIOCTLCMD; - default: -#ifdef CONFIG_IP_MROUTE - return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); -#else - return -ENOIOCTLCMD; -#endif - } -} -#endif - struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, @@ -878,7 +860,6 @@ struct proto raw_prot = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_raw_setsockopt, .compat_getsockopt = compat_raw_getsockopt, - .compat_ioctl = compat_raw_ioctl, #endif }; diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index b1e5d3ac3460..351dc4e85242 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -152,41 +152,6 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, { } -static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) -{ - struct rtable *rt = (struct rtable *) dst; - struct inet_peer *peer; - u32 *p = NULL; - - if (!rt->peer) - rt_bind_peer(rt, 1); - - peer = rt->peer; - if (peer) { - u32 *old_p = __DST_METRICS_PTR(old); - unsigned long prev, new; - - p = peer->metrics; - if (inet_metrics_new(peer)) - memcpy(p, old_p, sizeof(u32) * RTAX_MAX); - - new = (unsigned long) p; - prev = cmpxchg(&dst->_metrics, old, new); - - if (prev != old) { - p = __DST_METRICS_PTR(prev); - if (prev & DST_METRICS_READ_ONLY) - p = NULL; - } else { - if (rt->fi) { - fib_info_put(rt->fi); - rt->fi = NULL; - } - } - } - return p; -} - static struct dst_ops ipv4_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), @@ -194,7 +159,6 @@ static struct dst_ops ipv4_dst_ops = { .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .default_mtu = ipv4_default_mtu, - .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .ifdown = ipv4_dst_ifdown, .negative_advice = ipv4_negative_advice, @@ -550,7 +514,7 @@ static const struct file_operations rt_cpu_seq_fops = { .release = seq_release, }; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE static int rt_acct_proc_show(struct seq_file *m, void *v) { struct ip_rt_acct *dst, *src; @@ -603,14 +567,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net) if (!pde) goto err2; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops); if (!pde) goto err3; #endif return 0; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE err3: remove_proc_entry("rt_cache", net->proc_net_stat); #endif @@ -624,7 +588,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE remove_proc_entry("rt_acct", net->proc_net); #endif } @@ -1477,8 +1441,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, if (rt->peer) atomic_inc(&rt->peer->refcnt); - if (rt->fi) - atomic_inc(&rt->fi->fib_clntref); if (arp_bind_neighbour(&rt->dst) || !(rt->dst.neighbour->nud_state & @@ -1758,10 +1720,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst) struct rtable *rt = (struct rtable *) dst; struct inet_peer *peer = rt->peer; - if (rt->fi) { - fib_info_put(rt->fi); - rt->fi = NULL; - } if (peer) { rt->peer = NULL; inet_putpeer(peer); @@ -1817,7 +1775,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) memcpy(addr, &src, 4); } -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE static void set_class_tag(struct rtable *rt, u32 tag) { if (!(rt->dst.tclassid & 0xFFFF)) @@ -1857,30 +1815,6 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst) return mtu; } -static void rt_init_metrics(struct rtable *rt, struct fib_info *fi) -{ - if (!(rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)) { - no_cow: - if (fi->fib_metrics != (u32 *) dst_default_metrics) { - rt->fi = fi; - atomic_inc(&fi->fib_clntref); - } - dst_init_metrics(&rt->dst, fi->fib_metrics, true); - } else { - struct inet_peer *peer; - - if (!rt->peer) - rt_bind_peer(rt, 1); - peer = rt->peer; - if (!peer) - goto no_cow; - if (inet_metrics_new(peer)) - memcpy(peer->metrics, fi->fib_metrics, - sizeof(u32) * RTAX_MAX); - dst_init_metrics(&rt->dst, peer->metrics, false); - } -} - static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) { struct dst_entry *dst = &rt->dst; @@ -1890,8 +1824,8 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = FIB_RES_GW(*res); - rt_init_metrics(rt, fi); -#ifdef CONFIG_IP_ROUTE_CLASSID + dst_import_metrics(dst, fi->fib_metrics); +#ifdef CONFIG_NET_CLS_ROUTE dst->tclassid = FIB_RES_NH(*res).nh_tclassid; #endif } @@ -1901,7 +1835,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40) dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE #ifdef CONFIG_IP_MULTIPLE_TABLES set_class_tag(rt, fib_rules_tclass(res)); #endif @@ -1957,7 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.mark = skb->mark; rth->fl.fl4_src = saddr; rth->rt_src = saddr; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE rth->dst.tclassid = itag; #endif rth->rt_iif = @@ -2274,7 +2208,7 @@ out: return err; rth->fl.mark = skb->mark; rth->fl.fl4_src = saddr; rth->rt_src = saddr; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE rth->dst.tclassid = itag; #endif rth->rt_iif = @@ -2818,9 +2752,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi rt->peer = ort->peer; if (rt->peer) atomic_inc(&rt->peer->refcnt); - rt->fi = ort->fi; - if (rt->fi) - atomic_inc(&rt->fi->fib_clntref); dst_free(new); } @@ -2897,7 +2828,7 @@ static int rt_fill_info(struct net *net, } if (rt->dst.dev) NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (rt->dst.tclassid) NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); #endif @@ -3318,9 +3249,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = { }; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; -#endif /* CONFIG_IP_ROUTE_CLASSID */ +#endif /* CONFIG_NET_CLS_ROUTE */ static __initdata unsigned long rhash_entries; static int __init set_rhash_entries(char *str) @@ -3336,7 +3267,7 @@ int __init ip_rt_init(void) { int rc = 0; -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); if (!ip_rt_acct) panic("IP: failed to allocate ip_rt_acct\n"); diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index f9867d2dbef4..6c11eece262c 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL(compat_tcp_getsockopt); #endif -struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features) +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index eb7f82ebf4a3..2549b29b062d 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -4399,7 +4399,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; - eaten = (chunk == skb->len); + eaten = (chunk == skb->len && !th->fin); tcp_rcv_space_adjust(sk); } local_bh_disable(); diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index 02f583b3744a..856f68466d49 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -1994,6 +1994,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) } req = req->dl_next; } + st->offset = 0; if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) break; get_req: diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index d37baaa1dbe3..8157b17959ee 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -2199,7 +2199,7 @@ int udp4_ufo_send_check(struct sk_buff *skb) return 0; } -struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features) +struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/trunk/net/ipv4/xfrm4_policy.c b/trunk/net/ipv4/xfrm4_policy.c index 19fbdec6baaa..b057d40addec 100644 --- a/trunk/net/ipv4/xfrm4_policy.c +++ b/trunk/net/ipv4/xfrm4_policy.c @@ -196,11 +196,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - dst_destroy_metrics_generic(dst); - if (likely(xdst->u.rt.peer)) inet_putpeer(xdst->u.rt.peer); - xfrm_dst_destroy(xdst); } @@ -218,7 +215,6 @@ static struct dst_ops xfrm4_dst_ops = { .protocol = cpu_to_be16(ETH_P_IP), .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, - .cow_metrics = dst_cow_metrics_generic, .destroy = xfrm4_dst_destroy, .ifdown = xfrm4_dst_ifdown, .local_out = __ip_local_out, diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index fd6782e3a038..24a1cf110d80 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -2661,12 +2661,14 @@ static int addrconf_ifdown(struct net_device *dev, int how) struct net *net = dev_net(dev); struct inet6_dev *idev; struct inet6_ifaddr *ifa; - int state, i; + LIST_HEAD(keep_list); + int state; ASSERT_RTNL(); - rt6_ifdown(net, dev); - neigh_ifdown(&nd_tbl, dev); + /* Flush routes if device is being removed or it is not loopback */ + if (how || !(dev->flags & IFF_LOOPBACK)) + rt6_ifdown(net, dev); idev = __in6_dev_get(dev); if (idev == NULL) @@ -2687,23 +2689,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) } - /* Step 2: clear hash table */ - for (i = 0; i < IN6_ADDR_HSIZE; i++) { - struct hlist_head *h = &inet6_addr_lst[i]; - struct hlist_node *n; - - spin_lock_bh(&addrconf_hash_lock); - restart: - hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { - if (ifa->idev == idev) { - hlist_del_init_rcu(&ifa->addr_lst); - addrconf_del_timer(ifa); - goto restart; - } - } - spin_unlock_bh(&addrconf_hash_lock); - } - write_lock_bh(&idev->lock); /* Step 2: clear flags for stateless addrconf */ @@ -2737,24 +2722,53 @@ static int addrconf_ifdown(struct net_device *dev, int how) struct inet6_ifaddr, if_list); addrconf_del_timer(ifa); - list_del(&ifa->if_list); + /* If just doing link down, and address is permanent + and not link-local, then retain it. */ + if (!how && + (ifa->flags&IFA_F_PERMANENT) && + !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { + list_move_tail(&ifa->if_list, &keep_list); + + /* If not doing DAD on this address, just keep it. */ + if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || + idev->cnf.accept_dad <= 0 || + (ifa->flags & IFA_F_NODAD)) + continue; - write_unlock_bh(&idev->lock); + /* If it was tentative already, no need to notify */ + if (ifa->flags & IFA_F_TENTATIVE) + continue; - spin_lock_bh(&ifa->state_lock); - state = ifa->state; - ifa->state = INET6_IFADDR_STATE_DEAD; - spin_unlock_bh(&ifa->state_lock); + /* Flag it for later restoration when link comes up */ + ifa->flags |= IFA_F_TENTATIVE; + ifa->state = INET6_IFADDR_STATE_DAD; + } else { + list_del(&ifa->if_list); + + /* clear hash table */ + spin_lock_bh(&addrconf_hash_lock); + hlist_del_init_rcu(&ifa->addr_lst); + spin_unlock_bh(&addrconf_hash_lock); + + write_unlock_bh(&idev->lock); + spin_lock_bh(&ifa->state_lock); + state = ifa->state; + ifa->state = INET6_IFADDR_STATE_DEAD; + spin_unlock_bh(&ifa->state_lock); + + if (state != INET6_IFADDR_STATE_DEAD) { + __ipv6_ifa_notify(RTM_DELADDR, ifa); + atomic_notifier_call_chain(&inet6addr_chain, + NETDEV_DOWN, ifa); + } - if (state != INET6_IFADDR_STATE_DEAD) { - __ipv6_ifa_notify(RTM_DELADDR, ifa); - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + in6_ifa_put(ifa); + write_lock_bh(&idev->lock); } - in6_ifa_put(ifa); - - write_lock_bh(&idev->lock); } + list_splice(&keep_list, &idev->addr_list); + write_unlock_bh(&idev->lock); /* Step 5: Discard multicast list */ @@ -4142,7 +4156,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) addrconf_leave_solict(ifp->idev, &ifp->addr); dst_hold(&ifp->rt->dst); - if (ip6_del_rt(ifp->rt)) + if (ifp->state == INET6_IFADDR_STATE_DEAD && + ip6_del_rt(ifp->rt)) dst_free(&ifp->rt->dst); break; } diff --git a/trunk/net/ipv6/af_inet6.c b/trunk/net/ipv6/af_inet6.c index 3194aa909872..978e80e2c4a8 100644 --- a/trunk/net/ipv6/af_inet6.c +++ b/trunk/net/ipv6/af_inet6.c @@ -772,7 +772,7 @@ static int ipv6_gso_send_check(struct sk_buff *skb) return err; } -static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features) +static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; diff --git a/trunk/net/ipv6/netfilter/ip6_tables.c b/trunk/net/ipv6/netfilter/ip6_tables.c index 47b7b8df7fac..7d227c644f72 100644 --- a/trunk/net/ipv6/netfilter/ip6_tables.c +++ b/trunk/net/ipv6/netfilter/ip6_tables.c @@ -1076,7 +1076,6 @@ static int compat_table_info(const struct xt_table_info *info, memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries[raw_smp_processor_id()]; - xt_compat_init_offsets(AF_INET6, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) @@ -1680,7 +1679,6 @@ translate_compat_table(struct net *net, duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); - xt_compat_init_offsets(AF_INET6, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, diff --git a/trunk/net/ipv6/netfilter/ip6t_LOG.c b/trunk/net/ipv6/netfilter/ip6t_LOG.c index 05027b753721..09c88891a753 100644 --- a/trunk/net/ipv6/netfilter/ip6t_LOG.c +++ b/trunk/net/ipv6/netfilter/ip6t_LOG.c @@ -452,7 +452,8 @@ ip6t_log_packet(u_int8_t pf, in ? in->name : "", out ? out->name : ""); - if (in != NULL) + /* MAC logging for input path only. */ + if (in && !out) dump_mac_header(m, loginfo, skb); dump_packet(m, loginfo, skb, skb_network_offset(skb), 1); diff --git a/trunk/net/ipv6/netfilter/nf_conntrack_reasm.c b/trunk/net/ipv6/netfilter/nf_conntrack_reasm.c index 085727263812..79d43aa8fa8d 100644 --- a/trunk/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/trunk/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -45,7 +45,6 @@ #include #include #include -#include struct nf_ct_frag6_skb_cb @@ -74,7 +73,7 @@ static struct inet_frags nf_frags; static struct netns_frags nf_init_frags; #ifdef CONFIG_SYSCTL -static struct ctl_table nf_ct_frag6_sysctl_table[] = { +struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", .data = &nf_init_frags.timeout, diff --git a/trunk/net/ipv6/raw.c b/trunk/net/ipv6/raw.c index 2bc6cd7bb8ec..86c39526ba5e 100644 --- a/trunk/net/ipv6/raw.c +++ b/trunk/net/ipv6/raw.c @@ -123,18 +123,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); +static int (*mh_filter)(struct sock *sock, struct sk_buff *skb); -static mh_filter_t __rcu *mh_filter __read_mostly; - -int rawv6_mh_filter_register(mh_filter_t filter) +int rawv6_mh_filter_register(int (*filter)(struct sock *sock, + struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, filter); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_register); -int rawv6_mh_filter_unregister(mh_filter_t filter) +int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock, + struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, NULL); synchronize_rcu(); @@ -192,10 +192,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) * policy is placed in rawv6_rcv() because it is * required for each socket. */ - mh_filter_t *filter; + int (*filter)(struct sock *sock, struct sk_buff *skb); filter = rcu_dereference(mh_filter); - filtered = filter ? (*filter)(sk, skb) : 0; + filtered = filter ? filter(sk, skb) : 0; break; } #endif diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 72609f1c6158..373bd0416f69 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -72,6 +72,8 @@ #define RT6_TRACE(x...) do { ; } while (0) #endif +#define CLONE_OFFLINK_ROUTE 0 + static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ip6_default_advmss(const struct dst_entry *dst); @@ -97,36 +99,6 @@ static struct rt6_info *rt6_get_route_info(struct net *net, struct in6_addr *gwaddr, int ifindex); #endif -static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) -{ - struct rt6_info *rt = (struct rt6_info *) dst; - struct inet_peer *peer; - u32 *p = NULL; - - if (!rt->rt6i_peer) - rt6_bind_peer(rt, 1); - - peer = rt->rt6i_peer; - if (peer) { - u32 *old_p = __DST_METRICS_PTR(old); - unsigned long prev, new; - - p = peer->metrics; - if (inet_metrics_new(peer)) - memcpy(p, old_p, sizeof(u32) * RTAX_MAX); - - new = (unsigned long) p; - prev = cmpxchg(&dst->_metrics, old, new); - - if (prev != old) { - p = __DST_METRICS_PTR(prev); - if (prev & DST_METRICS_READ_ONLY) - p = NULL; - } - } - return p; -} - static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, .protocol = cpu_to_be16(ETH_P_IPV6), @@ -135,7 +107,6 @@ static struct dst_ops ip6_dst_ops_template = { .check = ip6_dst_check, .default_advmss = ip6_default_advmss, .default_mtu = ip6_default_mtu, - .cow_metrics = ipv6_cow_metrics, .destroy = ip6_dst_destroy, .ifdown = ip6_dst_ifdown, .negative_advice = ip6_negative_advice, @@ -156,10 +127,6 @@ static struct dst_ops ip6_dst_blackhole_ops = { .update_pmtu = ip6_rt_blackhole_update_pmtu, }; -static const u32 ip6_template_metrics[RTAX_MAX] = { - [RTAX_HOPLIMIT - 1] = 255, -}; - static struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), @@ -229,6 +196,7 @@ static void ip6_dst_destroy(struct dst_entry *dst) in6_dev_put(idev); } if (peer) { + BUG_ON(!(rt->rt6i_flags & RTF_CACHE)); rt->rt6i_peer = NULL; inet_putpeer(peer); } @@ -238,6 +206,9 @@ void rt6_bind_peer(struct rt6_info *rt, int create) { struct inet_peer *peer; + if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE))) + return; + peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) inet_putpeer(peer); @@ -767,8 +738,13 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); - else + else { +#if CLONE_OFFLINK_ROUTE nrt = rt6_alloc_clone(rt, &fl->fl6_dst); +#else + goto out2; +#endif + } dst_release(&rt->dst); rt = nrt ? : net->ipv6.ip6_null_entry; @@ -2712,8 +2688,7 @@ static int __net_init ip6_route_net_init(struct net *net) net->ipv6.ip6_null_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_null_entry; net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_null_entry->dst, - ip6_template_metrics, true); + dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, @@ -2724,8 +2699,7 @@ static int __net_init ip6_route_net_init(struct net *net) net->ipv6.ip6_prohibit_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_prohibit_entry; net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, - ip6_template_metrics, true); + dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255); net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, sizeof(*net->ipv6.ip6_blk_hole_entry), @@ -2735,8 +2709,7 @@ static int __net_init ip6_route_net_init(struct net *net) net->ipv6.ip6_blk_hole_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, - ip6_template_metrics, true); + dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255); #endif net->ipv6.sysctl.flush_delay = 0; diff --git a/trunk/net/ipv6/sit.c b/trunk/net/ipv6/sit.c index b1599a345c10..8ce38f10a547 100644 --- a/trunk/net/ipv6/sit.c +++ b/trunk/net/ipv6/sit.c @@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head) p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); do { - n = rcu_dereference_protected(p->next, 1); + n = p->next; kfree(p); p = n; } while (p); @@ -421,17 +421,15 @@ static void prl_list_destroy_rcu(struct rcu_head *head) static int ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) { - struct ip_tunnel_prl_entry *x; - struct ip_tunnel_prl_entry __rcu **p; + struct ip_tunnel_prl_entry *x, **p; int err = 0; ASSERT_RTNL(); if (a && a->addr != htonl(INADDR_ANY)) { - for (p = &t->prl; - (x = rtnl_dereference(*p)) != NULL; - p = &x->next) { - if (x->addr == a->addr) { + for (p = &t->prl; *p; p = &(*p)->next) { + if ((*p)->addr == a->addr) { + x = *p; *p = x->next; call_rcu(&x->rcu_head, prl_entry_destroy_rcu); t->prl_count--; @@ -440,9 +438,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) } err = -ENXIO; } else { - x = rtnl_dereference(t->prl); - if (x) { + if (t->prl) { t->prl_count = 0; + x = t->prl; call_rcu(&x->rcu_head, prl_list_destroy_rcu); t->prl = NULL; } @@ -1181,7 +1179,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) if (!dev->tstats) return -ENOMEM; dev_hold(dev); - rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); + sitn->tunnels_wc[0] = tunnel; return 0; } @@ -1198,12 +1196,11 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea for (prio = 1; prio < 4; prio++) { int h; for (h = 0; h < HASH_SIZE; h++) { - struct ip_tunnel *t; + struct ip_tunnel *t = sitn->tunnels[prio][h]; - t = rtnl_dereference(sitn->tunnels[prio][h]); while (t != NULL) { unregister_netdevice_queue(t->dev, head); - t = rtnl_dereference(t->next); + t = t->next; } } } diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index a419a787eb69..9a009c66c8a3 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -1299,7 +1299,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb) return 0; } -static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/trunk/net/ipv6/xfrm6_policy.c b/trunk/net/ipv6/xfrm6_policy.c index 834dc02f1d4f..7e74023ea6e4 100644 --- a/trunk/net/ipv6/xfrm6_policy.c +++ b/trunk/net/ipv6/xfrm6_policy.c @@ -98,10 +98,6 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, if (!xdst->u.rt6.rt6i_idev) return -ENODEV; - xdst->u.rt6.rt6i_peer = rt->rt6i_peer; - if (rt->rt6i_peer) - atomic_inc(&rt->rt6i_peer->refcnt); - /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | @@ -220,9 +216,6 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) if (likely(xdst->u.rt6.rt6i_idev)) in6_dev_put(xdst->u.rt6.rt6i_idev); - dst_destroy_metrics_generic(dst); - if (likely(xdst->u.rt6.rt6i_peer)) - inet_putpeer(xdst->u.rt6.rt6i_peer); xfrm_dst_destroy(xdst); } @@ -258,7 +251,6 @@ static struct dst_ops xfrm6_dst_ops = { .protocol = cpu_to_be16(ETH_P_IPV6), .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, - .cow_metrics = dst_cow_metrics_generic, .destroy = xfrm6_dst_destroy, .ifdown = xfrm6_dst_ifdown, .local_out = __ip6_local_out, diff --git a/trunk/net/mac80211/agg-rx.c b/trunk/net/mac80211/agg-rx.c index 0c9d0c07eae6..227ca82eef72 100644 --- a/trunk/net/mac80211/agg-rx.c +++ b/trunk/net/mac80211/agg-rx.c @@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, #endif /* CONFIG_MAC80211_HT_DEBUG */ if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, - &sta->sta, tid, NULL, 0)) + &sta->sta, tid, NULL)) printk(KERN_DEBUG "HW problem - can not stop rx " "aggregation for tid %d\n", tid); @@ -232,9 +232,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, if (buf_size == 0) buf_size = IEEE80211_MAX_AMPDU_BUF; - /* make sure the size doesn't exceed the maximum supported by the hw */ - if (buf_size > local->hw.max_rx_aggregation_subframes) - buf_size = local->hw.max_rx_aggregation_subframes; /* examine state machine */ mutex_lock(&sta->ampdu_mlme.mtx); @@ -290,7 +287,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, } ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, - &sta->sta, tid, &start_seq_num, 0); + &sta->sta, tid, &start_seq_num); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); #endif /* CONFIG_MAC80211_HT_DEBUG */ diff --git a/trunk/net/mac80211/agg-tx.c b/trunk/net/mac80211/agg-tx.c index 63d852cb4ca2..9cc472c6a6a5 100644 --- a/trunk/net/mac80211/agg-tx.c +++ b/trunk/net/mac80211/agg-tx.c @@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_STOP, - &sta->sta, tid, NULL, 0); + &sta->sta, tid, NULL); /* HW shall not deny going back to legacy */ if (WARN_ON(ret)) { @@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) start_seq_num = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, - &sta->sta, tid, &start_seq_num, 0); + &sta->sta, tid, &start_seq_num); if (ret) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - HW unavailable for" @@ -342,8 +342,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, start_seq_num, - local->hw.max_tx_aggregation_subframes, - tid_tx->timeout); + 0x40, tid_tx->timeout); } int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, @@ -488,8 +487,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_OPERATIONAL, - &sta->sta, tid, NULL, - sta->ampdu_mlme.tid_tx[tid]->buf_size); + &sta->sta, tid, NULL); /* * synchronize with TX path, while splicing the TX path @@ -744,11 +742,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, { struct tid_ampdu_tx *tid_tx; u16 capab, tid; - u8 buf_size; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; mutex_lock(&sta->ampdu_mlme.mtx); @@ -771,23 +767,12 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS) { - /* - * IEEE 802.11-2007 7.3.1.14: - * In an ADDBA Response frame, when the Status Code field - * is set to 0, the Buffer Size subfield is set to a value - * of at least 1. - */ - if (!buf_size) - goto out; - if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { /* ignore duplicate response */ goto out; } - tid_tx->buf_size = buf_size; - if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); diff --git a/trunk/net/mac80211/driver-ops.h b/trunk/net/mac80211/driver-ops.h index 78af32d4bc58..98d589960a49 100644 --- a/trunk/net/mac80211/driver-ops.h +++ b/trunk/net/mac80211/driver-ops.h @@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size) + u16 *ssn) { int ret = -EOPNOTSUPP; might_sleep(); - trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); + trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn); if (local->ops->ampdu_action) ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, - sta, tid, ssn, buf_size); + sta, tid, ssn); trace_drv_return_int(local, ret); diff --git a/trunk/net/mac80211/driver-trace.h b/trunk/net/mac80211/driver-trace.h index e5cce19a7d65..49c84218b2f4 100644 --- a/trunk/net/mac80211/driver-trace.h +++ b/trunk/net/mac80211/driver-trace.h @@ -9,11 +9,6 @@ #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ static inline void trace_ ## name(proto) {} -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(...) -#undef DEFINE_EVENT -#define DEFINE_EVENT(evt_class, name, proto, ...) \ -static inline void trace_ ## name(proto) {} #endif #undef TRACE_SYSTEM @@ -43,7 +38,7 @@ static inline void trace_ ## name(proto) {} * Tracing for driver callbacks. */ -DECLARE_EVENT_CLASS(local_only_evt, +TRACE_EVENT(drv_return_void, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local), TP_STRUCT__entry( @@ -55,11 +50,6 @@ DECLARE_EVENT_CLASS(local_only_evt, TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) ); -DEFINE_EVENT(local_only_evt, drv_return_void, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) -); - TRACE_EVENT(drv_return_int, TP_PROTO(struct ieee80211_local *local, int ret), TP_ARGS(local, ret), @@ -88,14 +78,40 @@ TRACE_EVENT(drv_return_u64, TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) ); -DEFINE_EVENT(local_only_evt, drv_start, +TRACE_EVENT(drv_start, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); -DEFINE_EVENT(local_only_evt, drv_stop, +TRACE_EVENT(drv_stop, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); TRACE_EVENT(drv_add_interface, @@ -423,14 +439,40 @@ TRACE_EVENT(drv_hw_scan, ) ); -DEFINE_EVENT(local_only_evt, drv_sw_scan_start, +TRACE_EVENT(drv_sw_scan_start, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); -DEFINE_EVENT(local_only_evt, drv_sw_scan_complete, +TRACE_EVENT(drv_sw_scan_complete, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); TRACE_EVENT(drv_get_stats, @@ -660,9 +702,23 @@ TRACE_EVENT(drv_conf_tx, ) ); -DEFINE_EVENT(local_only_evt, drv_get_tsf, +TRACE_EVENT(drv_get_tsf, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, + LOCAL_PR_ARG + ) ); TRACE_EVENT(drv_set_tsf, @@ -686,14 +742,41 @@ TRACE_EVENT(drv_set_tsf, ) ); -DEFINE_EVENT(local_only_evt, drv_reset_tsf, +TRACE_EVENT(drv_reset_tsf, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); -DEFINE_EVENT(local_only_evt, drv_tx_last_beacon, +TRACE_EVENT(drv_tx_last_beacon, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, + LOCAL_PR_ARG + ) ); TRACE_EVENT(drv_ampdu_action, @@ -701,9 +784,9 @@ TRACE_EVENT(drv_ampdu_action, struct ieee80211_sub_if_data *sdata, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size), + u16 *ssn), - TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size), + TP_ARGS(local, sdata, action, sta, tid, ssn), TP_STRUCT__entry( LOCAL_ENTRY @@ -711,7 +794,6 @@ TRACE_EVENT(drv_ampdu_action, __field(u32, action) __field(u16, tid) __field(u16, ssn) - __field(u8, buf_size) VIF_ENTRY ), @@ -722,13 +804,11 @@ TRACE_EVENT(drv_ampdu_action, __entry->action = action; __entry->tid = tid; __entry->ssn = ssn ? *ssn : 0; - __entry->buf_size = buf_size; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d", - LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, - __entry->tid, __entry->buf_size + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid ) ); @@ -879,9 +959,22 @@ TRACE_EVENT(drv_remain_on_channel, ) ); -DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel, +TRACE_EVENT(drv_cancel_remain_on_channel, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); /* @@ -976,9 +1069,23 @@ TRACE_EVENT(api_stop_tx_ba_cb, ) ); -DEFINE_EVENT(local_only_evt, api_restart_hw, +TRACE_EVENT(api_restart_hw, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, + LOCAL_PR_ARG + ) ); TRACE_EVENT(api_beacon_loss, @@ -1107,14 +1214,40 @@ TRACE_EVENT(api_chswitch_done, ) ); -DEFINE_EVENT(local_only_evt, api_ready_on_channel, +TRACE_EVENT(api_ready_on_channel, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); -DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired, +TRACE_EVENT(api_remain_on_channel_expired, TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) ); /* diff --git a/trunk/net/mac80211/ibss.c b/trunk/net/mac80211/ibss.c index 775fb63471c4..53c7077ffd4f 100644 --- a/trunk/net/mac80211/ibss.c +++ b/trunk/net/mac80211/ibss.c @@ -270,8 +270,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, enum ieee80211_band band = rx_status->band; if (elems->ds_params && elems->ds_params_len == 1) - freq = ieee80211_channel_to_frequency(elems->ds_params[0], - band); + freq = ieee80211_channel_to_frequency(elems->ds_params[0]); else freq = rx_status->freq; diff --git a/trunk/net/mac80211/main.c b/trunk/net/mac80211/main.c index 09a27449f3fd..a46ff06d7cb8 100644 --- a/trunk/net/mac80211/main.c +++ b/trunk/net/mac80211/main.c @@ -554,7 +554,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; - local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->user_power_level = -1; diff --git a/trunk/net/mac80211/mesh.c b/trunk/net/mac80211/mesh.c index 2a57cc02c618..ca3af4685b0a 100644 --- a/trunk/net/mac80211/mesh.c +++ b/trunk/net/mac80211/mesh.c @@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, &elems); if (elems.ds_params && elems.ds_params_len == 1) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); + freq = ieee80211_channel_to_frequency(elems.ds_params[0]); else freq = rx_status->freq; @@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) mesh_mpath_table_grow(); - if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) + if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) mesh_mpp_table_grow(); if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 32210695b8b6..45fbb9e33746 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -176,7 +176,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, /* check that channel matches the right operating channel */ if (local->hw.conf.channel->center_freq != - ieee80211_channel_to_frequency(hti->control_chan, sband->band)) + ieee80211_channel_to_frequency(hti->control_chan)) enable_ht = false; if (enable_ht) { @@ -429,8 +429,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, container_of((void *)bss, struct cfg80211_bss, priv); struct ieee80211_channel *new_ch; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, - cbss->channel->band); + int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); ASSERT_MGD_MTX(ifmgd); @@ -1520,8 +1519,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, } if (elems->ds_params && elems->ds_params_len == 1) - freq = ieee80211_channel_to_frequency(elems->ds_params[0], - rx_status->band); + freq = ieee80211_channel_to_frequency(elems->ds_params[0]); else freq = rx_status->freq; @@ -1974,9 +1972,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: No ack for nullfunc frame to" - " AP %pM, try %d/%i\n", + " AP %pM, try %d\n", sdata->name, bssid, - ifmgd->probe_send_count, max_tries); + ifmgd->probe_send_count); #endif ieee80211_mgd_probe_ap_send(sdata); } else { @@ -2003,10 +2001,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: No probe response from AP %pM" - " after %dms, try %d/%i\n", + " after %dms, try %d\n", sdata->name, bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ, - ifmgd->probe_send_count, max_tries); + ifmgd->probe_send_count); #endif ieee80211_mgd_probe_ap_send(sdata); } else { diff --git a/trunk/net/mac80211/rx.c b/trunk/net/mac80211/rx.c index f36d70f5b062..a6701ed87f0d 100644 --- a/trunk/net/mac80211/rx.c +++ b/trunk/net/mac80211/rx.c @@ -1556,36 +1556,17 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; - bool check_port_control = false; - struct ethhdr *ehdr; - int ret; if (ieee80211_has_a4(hdr->frame_control) && sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) return -1; - if (sdata->vif.type == NL80211_IFTYPE_STATION && - !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { - - if (!sdata->u.mgd.use_4addr) - return -1; - else - check_port_control = true; - } - if (is_multicast_ether_addr(hdr->addr1) && - sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) + ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) || + (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) return -1; - ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); - if (ret < 0 || !check_port_control) - return ret; - - ehdr = (struct ethhdr *) rx->skb->data; - if (ehdr->h_proto != rx->sdata->control_port_protocol) - return -1; - - return 0; + return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); } /* @@ -2711,7 +2692,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, if (!skb) { if (net_ratelimit()) wiphy_debug(local->hw.wiphy, - "failed to copy skb for %s\n", + "failed to copy multicast frame for %s\n", sdata->name); return true; } diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c index 1ef73be76b25..fb274db77e3c 100644 --- a/trunk/net/mac80211/scan.c +++ b/trunk/net/mac80211/scan.c @@ -196,8 +196,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) ieee802_11_parse_elems(elements, skb->len - baselen, &elems); if (elems.ds_params && elems.ds_params_len == 1) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], - rx_status->band); + freq = ieee80211_channel_to_frequency(elems.ds_params[0]); else freq = rx_status->freq; diff --git a/trunk/net/mac80211/sta_info.h b/trunk/net/mac80211/sta_info.h index ca0b69060ef7..bbdd2a86a94b 100644 --- a/trunk/net/mac80211/sta_info.h +++ b/trunk/net/mac80211/sta_info.h @@ -82,7 +82,6 @@ enum ieee80211_sta_info_flags { * @state: session state (see above) * @stop_initiator: initiator of a session stop * @tx_stop: TX DelBA frame when stopping - * @buf_size: reorder buffer size at receiver * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -102,7 +101,6 @@ struct tid_ampdu_tx { u8 dialog_token; u8 stop_initiator; bool tx_stop; - u8 buf_size; }; /** diff --git a/trunk/net/mac80211/tx.c b/trunk/net/mac80211/tx.c index ffc67491c38f..5950e3abead9 100644 --- a/trunk/net/mac80211/tx.c +++ b/trunk/net/mac80211/tx.c @@ -1750,7 +1750,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, __le16 fc; struct ieee80211_hdr hdr; struct ieee80211s_hdr mesh_hdr __maybe_unused; - struct mesh_path __maybe_unused *mppath = NULL; + struct mesh_path *mppath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; int nh_pos, h_pos; @@ -1815,19 +1815,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, mppath = mpp_path_lookup(skb->data, sdata); /* - * Use address extension if it is a packet from - * another interface or if we know the destination - * is being proxied by a portal (i.e. portal address - * differs from proxied address) + * Do not use address extension, if it is a packet from + * the same interface and the destination is not being + * proxied by any other mest point. */ if (compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN) == 0 && - !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { + (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, NULL, NULL); } else { + /* packet from other interface */ int is_mesh_mcast = 1; const u8 *mesh_da; @@ -2230,9 +2230,6 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, sdata = vif_to_sdata(vif); - if (!ieee80211_sdata_running(sdata)) - goto out; - if (tim_offset) *tim_offset = 0; if (tim_length) @@ -2302,11 +2299,6 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_mgmt *mgmt; u8 *pos; -#ifdef CONFIG_MAC80211_MESH - if (!sdata->u.mesh.mesh_id_len) - goto out; -#endif - /* headroom, head length, tail length and maximum TIM length */ skb = dev_alloc_skb(local->tx_headroom + 400 + sdata->u.mesh.vendor_ie_len); diff --git a/trunk/net/netfilter/Kconfig b/trunk/net/netfilter/Kconfig index faf7412ea453..1534f2b44caf 100644 --- a/trunk/net/netfilter/Kconfig +++ b/trunk/net/netfilter/Kconfig @@ -85,17 +85,6 @@ config NF_CONNTRACK_EVENTS If unsure, say `N'. -config NF_CONNTRACK_TIMESTAMP - bool 'Connection tracking timestamping' - depends on NETFILTER_ADVANCED - help - This option enables support for connection tracking timestamping. - This allows you to store the flow start-time and to obtain - the flow-stop time (once it has been destroyed) via Connection - tracking events. - - If unsure, say `N'. - config NF_CT_PROTO_DCCP tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)' depends on EXPERIMENTAL @@ -196,13 +185,9 @@ config NF_CONNTRACK_IRC To compile it as a module, choose M here. If unsure, say N. -config NF_CONNTRACK_BROADCAST - tristate - config NF_CONNTRACK_NETBIOS_NS tristate "NetBIOS name service protocol support" depends on NETFILTER_ADVANCED - select NF_CONNTRACK_BROADCAST help NetBIOS name service requests are sent as broadcast messages from an unprivileged port and responded to with unicast messages to the @@ -219,21 +204,6 @@ config NF_CONNTRACK_NETBIOS_NS To compile it as a module, choose M here. If unsure, say N. -config NF_CONNTRACK_SNMP - tristate "SNMP service protocol support" - depends on NETFILTER_ADVANCED - select NF_CONNTRACK_BROADCAST - help - SNMP service requests are sent as broadcast messages from an - unprivileged port and responded to with unicast messages to the - same port. This make them hard to firewall properly because connection - tracking doesn't deal with broadcasts. This helper tracks locally - originating SNMP service requests and the corresponding - responses. It relies on correct IP address configuration, specifically - netmask and broadcast address. - - To compile it as a module, choose M here. If unsure, say N. - config NF_CONNTRACK_PPTP tristate "PPtP protocol support" depends on NETFILTER_ADVANCED @@ -356,16 +326,6 @@ config NETFILTER_XT_CONNMARK comment "Xtables targets" -config NETFILTER_XT_TARGET_AUDIT - tristate "AUDIT target support" - depends on AUDIT - depends on NETFILTER_ADVANCED - ---help--- - This option adds a 'AUDIT' target, which can be used to create - audit records for packets dropped/accepted. - - To compileit as a module, choose M here. If unsure, say N. - config NETFILTER_XT_TARGET_CHECKSUM tristate "CHECKSUM target support" depends on IP_NF_MANGLE || IP6_NF_MANGLE @@ -517,7 +477,6 @@ config NETFILTER_XT_TARGET_NFLOG config NETFILTER_XT_TARGET_NFQUEUE tristate '"NFQUEUE" target Support' depends on NETFILTER_ADVANCED - select NETFILTER_NETLINK_QUEUE help This target replaced the old obsolete QUEUE target. @@ -927,7 +886,7 @@ config NETFILTER_XT_MATCH_RATEEST config NETFILTER_XT_MATCH_REALM tristate '"realm" match support' depends on NETFILTER_ADVANCED - select IP_ROUTE_CLASSID + select NET_CLS_ROUTE help This option adds a `realm' match, which allows you to use the realm key from the routing subsystem inside iptables. diff --git a/trunk/net/netfilter/Makefile b/trunk/net/netfilter/Makefile index 9ae6878a85b1..441050f31111 100644 --- a/trunk/net/netfilter/Makefile +++ b/trunk/net/netfilter/Makefile @@ -1,7 +1,6 @@ netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o -nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o obj-$(CONFIG_NETFILTER) = netfilter.o @@ -29,9 +28,7 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o -obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o -obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o @@ -48,7 +45,6 @@ obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o # targets -obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o diff --git a/trunk/net/netfilter/core.c b/trunk/net/netfilter/core.c index 1e00bf7d27c5..32fcbe290c04 100644 --- a/trunk/net/netfilter/core.c +++ b/trunk/net/netfilter/core.c @@ -175,21 +175,13 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, ret = 1; } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { kfree_skb(skb); - ret = NF_DROP_GETERR(verdict); + ret = -(verdict >> NF_VERDICT_BITS); if (ret == 0) ret = -EPERM; } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { - ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, - verdict >> NF_VERDICT_QBITS); - if (ret < 0) { - if (ret == -ECANCELED) - goto next_hook; - if (ret == -ESRCH && - (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) - goto next_hook; - kfree_skb(skb); - } - ret = 0; + if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn, + verdict >> NF_VERDICT_BITS)) + goto next_hook; } rcu_read_unlock(); return ret; @@ -222,7 +214,7 @@ EXPORT_SYMBOL(skb_make_writable); /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ -void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly; +void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); EXPORT_SYMBOL(ip_ct_attach); void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) @@ -239,7 +231,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) } EXPORT_SYMBOL(nf_ct_attach); -void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly; +void (*nf_ct_destroy)(struct nf_conntrack *); EXPORT_SYMBOL(nf_ct_destroy); void nf_conntrack_destroy(struct nf_conntrack *nfct) diff --git a/trunk/net/netfilter/ipvs/ip_vs_app.c b/trunk/net/netfilter/ipvs/ip_vs_app.c index 5c48ffb60c28..a475edee0912 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_app.c +++ b/trunk/net/netfilter/ipvs/ip_vs_app.c @@ -43,6 +43,11 @@ EXPORT_SYMBOL(register_ip_vs_app); EXPORT_SYMBOL(unregister_ip_vs_app); EXPORT_SYMBOL(register_ip_vs_app_inc); +/* ipvs application list head */ +static LIST_HEAD(ip_vs_app_list); +static DEFINE_MUTEX(__ip_vs_app_mutex); + + /* * Get an ip_vs_app object */ @@ -62,8 +67,7 @@ static inline void ip_vs_app_put(struct ip_vs_app *app) * Allocate/initialize app incarnation and register it in proto apps. */ static int -ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto, - __u16 port) +ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port) { struct ip_vs_protocol *pp; struct ip_vs_app *inc; @@ -94,7 +98,7 @@ ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto, } } - ret = pp->register_app(net, inc); + ret = pp->register_app(inc); if (ret) goto out; @@ -115,7 +119,7 @@ ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto, * Release app incarnation */ static void -ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc) +ip_vs_app_inc_release(struct ip_vs_app *inc) { struct ip_vs_protocol *pp; @@ -123,7 +127,7 @@ ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc) return; if (pp->unregister_app) - pp->unregister_app(net, inc); + pp->unregister_app(inc); IP_VS_DBG(9, "%s App %s:%u unregistered\n", pp->name, inc->name, ntohs(inc->port)); @@ -164,17 +168,15 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc) * Register an application incarnation in protocol applications */ int -register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, - __u16 port) +register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port) { - struct netns_ipvs *ipvs = net_ipvs(net); int result; - mutex_lock(&ipvs->app_mutex); + mutex_lock(&__ip_vs_app_mutex); - result = ip_vs_app_inc_new(net, app, proto, port); + result = ip_vs_app_inc_new(app, proto, port); - mutex_unlock(&ipvs->app_mutex); + mutex_unlock(&__ip_vs_app_mutex); return result; } @@ -183,17 +185,16 @@ register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, /* * ip_vs_app registration routine */ -int register_ip_vs_app(struct net *net, struct ip_vs_app *app) +int register_ip_vs_app(struct ip_vs_app *app) { - struct netns_ipvs *ipvs = net_ipvs(net); /* increase the module use count */ ip_vs_use_count_inc(); - mutex_lock(&ipvs->app_mutex); + mutex_lock(&__ip_vs_app_mutex); - list_add(&app->a_list, &ipvs->app_list); + list_add(&app->a_list, &ip_vs_app_list); - mutex_unlock(&ipvs->app_mutex); + mutex_unlock(&__ip_vs_app_mutex); return 0; } @@ -203,20 +204,19 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app) * ip_vs_app unregistration routine * We are sure there are no app incarnations attached to services */ -void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app) +void unregister_ip_vs_app(struct ip_vs_app *app) { - struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_app *inc, *nxt; - mutex_lock(&ipvs->app_mutex); + mutex_lock(&__ip_vs_app_mutex); list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) { - ip_vs_app_inc_release(net, inc); + ip_vs_app_inc_release(inc); } list_del(&app->a_list); - mutex_unlock(&ipvs->app_mutex); + mutex_unlock(&__ip_vs_app_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); @@ -226,8 +226,7 @@ void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app) /* * Bind ip_vs_conn to its ip_vs_app (called by cp constructor) */ -int ip_vs_bind_app(struct ip_vs_conn *cp, - struct ip_vs_protocol *pp) +int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { return pp->app_conn_bind(cp); } @@ -482,11 +481,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb) * /proc/net/ip_vs_app entry function */ -static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos) +static struct ip_vs_app *ip_vs_app_idx(loff_t pos) { struct ip_vs_app *app, *inc; - list_for_each_entry(app, &ipvs->app_list, a_list) { + list_for_each_entry(app, &ip_vs_app_list, a_list) { list_for_each_entry(inc, &app->incs_list, a_list) { if (pos-- == 0) return inc; @@ -498,24 +497,19 @@ static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos) static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos) { - struct net *net = seq_file_net(seq); - struct netns_ipvs *ipvs = net_ipvs(net); + mutex_lock(&__ip_vs_app_mutex); - mutex_lock(&ipvs->app_mutex); - - return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN; + return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_vs_app *inc, *app; struct list_head *e; - struct net *net = seq_file_net(seq); - struct netns_ipvs *ipvs = net_ipvs(net); ++*pos; if (v == SEQ_START_TOKEN) - return ip_vs_app_idx(ipvs, 0); + return ip_vs_app_idx(0); inc = v; app = inc->app; @@ -524,7 +518,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) return list_entry(e, struct ip_vs_app, a_list); /* go on to next application */ - for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) { + for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) { app = list_entry(e, struct ip_vs_app, a_list); list_for_each_entry(inc, &app->incs_list, a_list) { return inc; @@ -535,9 +529,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ip_vs_app_seq_stop(struct seq_file *seq, void *v) { - struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq)); - - mutex_unlock(&ipvs->app_mutex); + mutex_unlock(&__ip_vs_app_mutex); } static int ip_vs_app_seq_show(struct seq_file *seq, void *v) @@ -565,8 +557,7 @@ static const struct seq_operations ip_vs_app_seq_ops = { static int ip_vs_app_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &ip_vs_app_seq_ops, - sizeof(struct seq_net_private)); + return seq_open(file, &ip_vs_app_seq_ops); } static const struct file_operations ip_vs_app_fops = { @@ -578,36 +569,15 @@ static const struct file_operations ip_vs_app_fops = { }; #endif -static int __net_init __ip_vs_app_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - INIT_LIST_HEAD(&ipvs->app_list); - __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key); - proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops); - return 0; -} - -static void __net_exit __ip_vs_app_cleanup(struct net *net) -{ - proc_net_remove(net, "ip_vs_app"); -} - -static struct pernet_operations ip_vs_app_ops = { - .init = __ip_vs_app_init, - .exit = __ip_vs_app_cleanup, -}; - int __init ip_vs_app_init(void) { - int rv; - - rv = register_pernet_subsys(&ip_vs_app_ops); - return rv; + /* we will replace it with proc_net_ipvs_create() soon */ + proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); + return 0; } void ip_vs_app_cleanup(void) { - unregister_pernet_subsys(&ip_vs_app_ops); + proc_net_remove(&init_net, "ip_vs_app"); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_conn.c b/trunk/net/netfilter/ipvs/ip_vs_conn.c index 83233fe24a08..e9adecdc8ca4 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_conn.c +++ b/trunk/net/netfilter/ipvs/ip_vs_conn.c @@ -48,32 +48,35 @@ /* * Connection hash size. Default is what was selected at compile time. */ -static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; +int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444); MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size"); /* size and mask values */ -int ip_vs_conn_tab_size __read_mostly; -static int ip_vs_conn_tab_mask __read_mostly; +int ip_vs_conn_tab_size; +int ip_vs_conn_tab_mask; /* * Connection hash table: for input and output packets lookups of IPVS */ -static struct list_head *ip_vs_conn_tab __read_mostly; +static struct list_head *ip_vs_conn_tab; /* SLAB cache for IPVS connections */ static struct kmem_cache *ip_vs_conn_cachep __read_mostly; +/* counter for current IPVS connections */ +static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); + /* counter for no client port connections */ static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0); /* random value for IPVS connection hash */ -static unsigned int ip_vs_conn_rnd __read_mostly; +static unsigned int ip_vs_conn_rnd; /* * Fine locking granularity for big connection hash table */ -#define CT_LOCKARRAY_BITS 5 +#define CT_LOCKARRAY_BITS 4 #define CT_LOCKARRAY_SIZE (1<>8)) & ip_vs_conn_tab_mask; + return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), + (__force u32)port, proto, ip_vs_conn_rnd) + & ip_vs_conn_tab_mask; #endif - return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto, - ip_vs_conn_rnd) ^ - ((size_t)net>>8)) & ip_vs_conn_tab_mask; + return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, + ip_vs_conn_rnd) + & ip_vs_conn_tab_mask; } static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p, @@ -163,18 +166,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p, port = p->vport; } - return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port); + return ip_vs_conn_hashkey(p->af, p->protocol, addr, port); } static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) { struct ip_vs_conn_param p; - ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol, - &cp->caddr, cp->cport, NULL, 0, &p); + ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport, + NULL, 0, &p); - if (cp->pe) { - p.pe = cp->pe; + if (cp->dest && cp->dest->svc->pe) { + p.pe = cp->dest->svc->pe; p.pe_data = cp->pe_data; p.pe_data_len = cp->pe_data_len; } @@ -183,7 +186,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) } /* - * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port. + * Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port. * returns bool success. */ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) @@ -266,12 +269,11 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { if (cp->af == p->af && - p->cport == cp->cport && p->vport == cp->vport && ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) && + p->cport == cp->cport && p->vport == cp->vport && ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && - p->protocol == cp->protocol && - ip_vs_conn_net_eq(cp, p->net)) { + p->protocol == cp->protocol) { /* HIT */ atomic_inc(&cp->refcnt); ct_read_unlock(hash); @@ -311,23 +313,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, struct ip_vs_conn_param *p) { __be16 _ports[2], *pptr; - struct net *net = skb_net(skb); pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); if (pptr == NULL) return 1; if (likely(!inverse)) - ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr, - pptr[0], &iph->daddr, pptr[1], p); + ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0], + &iph->daddr, pptr[1], p); else - ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr, - pptr[1], &iph->saddr, pptr[0], p); + ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1], + &iph->saddr, pptr[0], p); return 0; } struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { @@ -351,10 +353,8 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) ct_read_lock(hash); list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { - if (!ip_vs_conn_net_eq(cp, p->net)) - continue; if (p->pe_data && p->pe->ct_match) { - if (p->pe == cp->pe && p->pe->ct_match(p, cp)) + if (p->pe->ct_match(p, cp)) goto out; continue; } @@ -404,11 +404,10 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { if (cp->af == p->af && - p->vport == cp->cport && p->cport == cp->dport && ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) && - p->protocol == cp->protocol && - ip_vs_conn_net_eq(cp, p->net)) { + p->vport == cp->cport && p->cport == cp->dport && + p->protocol == cp->protocol) { /* HIT */ atomic_inc(&cp->refcnt); ret = cp; @@ -429,6 +428,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { @@ -611,9 +611,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) struct ip_vs_dest *dest; if ((cp) && (!cp->dest)) { - dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, - cp->dport, &cp->vaddr, cp->vport, - cp->protocol, cp->fwmark); + dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport, + &cp->vaddr, cp->vport, + cp->protocol); ip_vs_bind_dest(cp, dest); return dest; } else @@ -686,14 +686,13 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) int ip_vs_check_template(struct ip_vs_conn *ct) { struct ip_vs_dest *dest = ct->dest; - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct)); /* * Checking the dest server status. */ if ((dest == NULL) || !(dest->flags & IP_VS_DEST_F_AVAILABLE) || - (ipvs->sysctl_expire_quiescent_template && + (sysctl_ip_vs_expire_quiescent_template && (atomic_read(&dest->weight) == 0))) { IP_VS_DBG_BUF(9, "check_template: dest not available for " "protocol %s s:%s:%d v:%s:%d " @@ -731,7 +730,6 @@ int ip_vs_check_template(struct ip_vs_conn *ct) static void ip_vs_conn_expire(unsigned long data) { struct ip_vs_conn *cp = (struct ip_vs_conn *)data; - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); cp->timeout = 60*HZ; @@ -767,14 +765,13 @@ static void ip_vs_conn_expire(unsigned long data) if (cp->flags & IP_VS_CONN_F_NFCT) ip_vs_conn_drop_conntrack(cp); - ip_vs_pe_put(cp->pe); kfree(cp->pe_data); if (unlikely(cp->app != NULL)) ip_vs_unbind_app(cp); ip_vs_unbind_dest(cp); if (cp->flags & IP_VS_CONN_F_NO_CPORT) atomic_dec(&ip_vs_conn_no_cport_cnt); - atomic_dec(&ipvs->conn_count); + atomic_dec(&ip_vs_conn_count); kmem_cache_free(ip_vs_conn_cachep, cp); return; @@ -805,12 +802,10 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) struct ip_vs_conn * ip_vs_conn_new(const struct ip_vs_conn_param *p, const union nf_inet_addr *daddr, __be16 dport, unsigned flags, - struct ip_vs_dest *dest, __u32 fwmark) + struct ip_vs_dest *dest) { struct ip_vs_conn *cp; - struct netns_ipvs *ipvs = net_ipvs(p->net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net, - p->protocol); + struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol); cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); if (cp == NULL) { @@ -820,7 +815,6 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, INIT_LIST_HEAD(&cp->c_list); setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); - ip_vs_conn_net_set(cp, p->net); cp->af = p->af; cp->protocol = p->protocol; ip_vs_addr_copy(p->af, &cp->caddr, p->caddr); @@ -832,10 +826,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, &cp->daddr, daddr); cp->dport = dport; cp->flags = flags; - cp->fwmark = fwmark; - if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) { - ip_vs_pe_get(p->pe); - cp->pe = p->pe; + if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) { cp->pe_data = p->pe_data; cp->pe_data_len = p->pe_data_len; } @@ -851,7 +842,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, atomic_set(&cp->n_control, 0); atomic_set(&cp->in_pkts, 0); - atomic_inc(&ipvs->conn_count); + atomic_inc(&ip_vs_conn_count); if (flags & IP_VS_CONN_F_NO_CPORT) atomic_inc(&ip_vs_conn_no_cport_cnt); @@ -870,8 +861,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, #endif ip_vs_bind_xmit(cp); - if (unlikely(pd && atomic_read(&pd->appcnt))) - ip_vs_bind_app(cp, pd->pp); + if (unlikely(pp && atomic_read(&pp->appcnt))) + ip_vs_bind_app(cp, pp); /* * Allow conntrack to be preserved. By default, conntrack @@ -880,7 +871,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, * IP_VS_CONN_F_ONE_PACKET too. */ - if (ip_vs_conntrack_enabled(ipvs)) + if (ip_vs_conntrack_enabled()) cp->flags |= IP_VS_CONN_F_NFCT; /* Hash it in the ip_vs_conn_tab finally */ @@ -893,22 +884,17 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, * /proc/net/ip_vs_conn entries */ #ifdef CONFIG_PROC_FS -struct ip_vs_iter_state { - struct seq_net_private p; - struct list_head *l; -}; static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) { int idx; struct ip_vs_conn *cp; - struct ip_vs_iter_state *iter = seq->private; for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { ct_read_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { if (pos-- == 0) { - iter->l = &ip_vs_conn_tab[idx]; + seq->private = &ip_vs_conn_tab[idx]; return cp; } } @@ -920,17 +906,14 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos) { - struct ip_vs_iter_state *iter = seq->private; - - iter->l = NULL; + seq->private = NULL; return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN; } static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_vs_conn *cp = v; - struct ip_vs_iter_state *iter = seq->private; - struct list_head *e, *l = iter->l; + struct list_head *e, *l = seq->private; int idx; ++*pos; @@ -947,19 +930,18 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) while (++idx < ip_vs_conn_tab_size) { ct_read_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { - iter->l = &ip_vs_conn_tab[idx]; + seq->private = &ip_vs_conn_tab[idx]; return cp; } ct_read_unlock_bh(idx); } - iter->l = NULL; + seq->private = NULL; return NULL; } static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v) { - struct ip_vs_iter_state *iter = seq->private; - struct list_head *l = iter->l; + struct list_head *l = seq->private; if (l) ct_read_unlock_bh(l - ip_vs_conn_tab); @@ -973,19 +955,18 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n"); else { const struct ip_vs_conn *cp = v; - struct net *net = seq_file_net(seq); char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3]; size_t len = 0; - if (!ip_vs_conn_net_eq(cp, net)) - return 0; - if (cp->pe_data) { + if (cp->dest && cp->pe_data && + cp->dest->svc->pe->show_pe_data) { pe_data[0] = ' '; - len = strlen(cp->pe->name); - memcpy(pe_data + 1, cp->pe->name, len); + len = strlen(cp->dest->svc->pe->name); + memcpy(pe_data + 1, cp->dest->svc->pe->name, len); pe_data[len + 1] = ' '; len += 2; - len += cp->pe->show_pe_data(cp, pe_data + len); + len += cp->dest->svc->pe->show_pe_data(cp, + pe_data + len); } pe_data[len] = '\0'; @@ -1023,8 +1004,7 @@ static const struct seq_operations ip_vs_conn_seq_ops = { static int ip_vs_conn_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &ip_vs_conn_seq_ops, - sizeof(struct ip_vs_iter_state)); + return seq_open(file, &ip_vs_conn_seq_ops); } static const struct file_operations ip_vs_conn_fops = { @@ -1051,10 +1031,6 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n"); else { const struct ip_vs_conn *cp = v; - struct net *net = seq_file_net(seq); - - if (!ip_vs_conn_net_eq(cp, net)) - return 0; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) @@ -1091,8 +1067,7 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = { static int ip_vs_conn_sync_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops, - sizeof(struct ip_vs_iter_state)); + return seq_open(file, &ip_vs_conn_sync_seq_ops); } static const struct file_operations ip_vs_conn_sync_fops = { @@ -1138,7 +1113,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp) } /* Called from keventd and must protect itself from softirqs */ -void ip_vs_random_dropentry(struct net *net) +void ip_vs_random_dropentry(void) { int idx; struct ip_vs_conn *cp; @@ -1158,8 +1133,7 @@ void ip_vs_random_dropentry(struct net *net) if (cp->flags & IP_VS_CONN_F_TEMPLATE) /* connection template */ continue; - if (!ip_vs_conn_net_eq(cp, net)) - continue; + if (cp->protocol == IPPROTO_TCP) { switch(cp->state) { case IP_VS_TCP_S_SYN_RECV: @@ -1194,13 +1168,12 @@ void ip_vs_random_dropentry(struct net *net) /* * Flush all the connection entries in the ip_vs_conn_tab */ -static void ip_vs_conn_flush(struct net *net) +static void ip_vs_conn_flush(void) { int idx; struct ip_vs_conn *cp; - struct netns_ipvs *ipvs = net_ipvs(net); -flush_again: + flush_again: for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { /* * Lock is actually needed in this loop. @@ -1208,8 +1181,7 @@ static void ip_vs_conn_flush(struct net *net) ct_write_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { - if (!ip_vs_conn_net_eq(cp, net)) - continue; + IP_VS_DBG(4, "del connection\n"); ip_vs_conn_expire_now(cp); if (cp->control) { @@ -1222,41 +1194,16 @@ static void ip_vs_conn_flush(struct net *net) /* the counter may be not NULL, because maybe some conn entries are run by slow timer handler or unhashed but still referred */ - if (atomic_read(&ipvs->conn_count) != 0) { + if (atomic_read(&ip_vs_conn_count) != 0) { schedule(); goto flush_again; } } -/* - * per netns init and exit - */ -int __net_init __ip_vs_conn_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - atomic_set(&ipvs->conn_count, 0); - - proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops); - proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops); - return 0; -} -static void __net_exit __ip_vs_conn_cleanup(struct net *net) -{ - /* flush all the connection entries first */ - ip_vs_conn_flush(net); - proc_net_remove(net, "ip_vs_conn"); - proc_net_remove(net, "ip_vs_conn_sync"); -} -static struct pernet_operations ipvs_conn_ops = { - .init = __ip_vs_conn_init, - .exit = __ip_vs_conn_cleanup, -}; int __init ip_vs_conn_init(void) { int idx; - int retc; /* Compute size and mask */ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; @@ -1294,18 +1241,24 @@ int __init ip_vs_conn_init(void) rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); } - retc = register_pernet_subsys(&ipvs_conn_ops); + proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops); + proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops); /* calculate the random value for connection hash */ get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); - return retc; + return 0; } + void ip_vs_conn_cleanup(void) { - unregister_pernet_subsys(&ipvs_conn_ops); + /* flush all the connection entries first */ + ip_vs_conn_flush(); + /* Release the empty cache */ kmem_cache_destroy(ip_vs_conn_cachep); + proc_net_remove(&init_net, "ip_vs_conn"); + proc_net_remove(&init_net, "ip_vs_conn_sync"); vfree(ip_vs_conn_tab); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index d889f4f6be99..b4e51e9c5a04 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -41,7 +41,6 @@ #include /* for icmp_send */ #include #include -#include /* net_generic() */ #include #include @@ -69,12 +68,6 @@ EXPORT_SYMBOL(ip_vs_conn_put); EXPORT_SYMBOL(ip_vs_get_debug_level); #endif -int ip_vs_net_id __read_mostly; -#ifdef IP_VS_GENERIC_NETNS -EXPORT_SYMBOL(ip_vs_net_id); -#endif -/* netns cnt used for uniqueness */ -static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0); /* ID used in ICMP lookups */ #define icmp_id(icmph) (((icmph)->un).echo.id) @@ -115,28 +108,21 @@ static inline void ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_dest *dest = cp->dest; - struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); - if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { - struct ip_vs_cpu_stats *s; - - s = this_cpu_ptr(dest->stats.cpustats); - s->ustats.inpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; - u64_stats_update_end(&s->syncp); - - s = this_cpu_ptr(dest->svc->stats.cpustats); - s->ustats.inpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; - u64_stats_update_end(&s->syncp); - - s = this_cpu_ptr(ipvs->cpustats); - s->ustats.inpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.inbytes += skb->len; - u64_stats_update_end(&s->syncp); + spin_lock(&dest->stats.lock); + dest->stats.ustats.inpkts++; + dest->stats.ustats.inbytes += skb->len; + spin_unlock(&dest->stats.lock); + + spin_lock(&dest->svc->stats.lock); + dest->svc->stats.ustats.inpkts++; + dest->svc->stats.ustats.inbytes += skb->len; + spin_unlock(&dest->svc->stats.lock); + + spin_lock(&ip_vs_stats.lock); + ip_vs_stats.ustats.inpkts++; + ip_vs_stats.ustats.inbytes += skb->len; + spin_unlock(&ip_vs_stats.lock); } } @@ -145,28 +131,21 @@ static inline void ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_dest *dest = cp->dest; - struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); - if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { - struct ip_vs_cpu_stats *s; - - s = this_cpu_ptr(dest->stats.cpustats); - s->ustats.outpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; - u64_stats_update_end(&s->syncp); - - s = this_cpu_ptr(dest->svc->stats.cpustats); - s->ustats.outpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; - u64_stats_update_end(&s->syncp); - - s = this_cpu_ptr(ipvs->cpustats); - s->ustats.outpkts++; - u64_stats_update_begin(&s->syncp); - s->ustats.outbytes += skb->len; - u64_stats_update_end(&s->syncp); + spin_lock(&dest->stats.lock); + dest->stats.ustats.outpkts++; + dest->stats.ustats.outbytes += skb->len; + spin_unlock(&dest->stats.lock); + + spin_lock(&dest->svc->stats.lock); + dest->svc->stats.ustats.outpkts++; + dest->svc->stats.ustats.outbytes += skb->len; + spin_unlock(&dest->svc->stats.lock); + + spin_lock(&ip_vs_stats.lock); + ip_vs_stats.ustats.outpkts++; + ip_vs_stats.ustats.outbytes += skb->len; + spin_unlock(&ip_vs_stats.lock); } } @@ -174,44 +153,41 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) static inline void ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) { - struct netns_ipvs *ipvs = net_ipvs(svc->net); - struct ip_vs_cpu_stats *s; - - s = this_cpu_ptr(cp->dest->stats.cpustats); - s->ustats.conns++; + spin_lock(&cp->dest->stats.lock); + cp->dest->stats.ustats.conns++; + spin_unlock(&cp->dest->stats.lock); - s = this_cpu_ptr(svc->stats.cpustats); - s->ustats.conns++; + spin_lock(&svc->stats.lock); + svc->stats.ustats.conns++; + spin_unlock(&svc->stats.lock); - s = this_cpu_ptr(ipvs->cpustats); - s->ustats.conns++; + spin_lock(&ip_vs_stats.lock); + ip_vs_stats.ustats.conns++; + spin_unlock(&ip_vs_stats.lock); } static inline int ip_vs_set_state(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, - struct ip_vs_proto_data *pd) + struct ip_vs_protocol *pp) { - if (unlikely(!pd->pp->state_transition)) + if (unlikely(!pp->state_transition)) return 0; - return pd->pp->state_transition(cp, direction, skb, pd); + return pp->state_transition(cp, direction, skb, pp); } -static inline int +static inline void ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc, struct sk_buff *skb, int protocol, const union nf_inet_addr *caddr, __be16 cport, const union nf_inet_addr *vaddr, __be16 vport, struct ip_vs_conn_param *p) { - ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr, - vport, p); + ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p); p->pe = svc->pe; if (p->pe && p->pe->fill_param) - return p->pe->fill_param(p, skb); - - return 0; + p->pe->fill_param(p, skb); } /* @@ -224,7 +200,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc, static struct ip_vs_conn * ip_vs_sched_persist(struct ip_vs_service *svc, struct sk_buff *skb, - __be16 src_port, __be16 dst_port, int *ignored) + __be16 ports[2]) { struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; @@ -248,8 +224,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc, IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " "mnet %s\n", - IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port), - IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port), + IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]), + IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]), IP_VS_DBG_ADDR(svc->af, &snet)); /* @@ -271,14 +247,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc, const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; __be16 vport = 0; - if (dst_port == svc->port) { + if (ports[1] == svc->port) { /* non-FTP template: * * FTP template: * */ if (svc->port != FTPPORT) - vport = dst_port; + vport = ports[1]; } else { /* Note: persistent fwmark-based services and * persistent port zero service are handled here. @@ -292,31 +268,24 @@ ip_vs_sched_persist(struct ip_vs_service *svc, vaddr = &fwmark; } } - /* return *ignored = -1 so NF_DROP can be used */ - if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0, - vaddr, vport, ¶m) < 0) { - *ignored = -1; - return NULL; - } + ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0, + vaddr, vport, ¶m); } /* Check if a template already exists */ ct = ip_vs_ct_in_get(¶m); if (!ct || !ip_vs_check_template(ct)) { - /* - * No template found or the dest of the connection + /* No template found or the dest of the connection * template is not available. - * return *ignored=0 i.e. ICMP and NF_DROP */ dest = svc->scheduler->schedule(svc, skb); if (!dest) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); kfree(param.pe_data); - *ignored = 0; return NULL; } - if (dst_port == svc->port && svc->port != FTPPORT) + if (ports[1] == svc->port && svc->port != FTPPORT) dport = dest->port; /* Create a template @@ -324,10 +293,9 @@ ip_vs_sched_persist(struct ip_vs_service *svc, * and thus param.pe_data will be destroyed * when the template expires */ ct = ip_vs_conn_new(¶m, &dest->addr, dport, - IP_VS_CONN_F_TEMPLATE, dest, skb->mark); + IP_VS_CONN_F_TEMPLATE, dest); if (ct == NULL) { kfree(param.pe_data); - *ignored = -1; return NULL; } @@ -338,7 +306,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, kfree(param.pe_data); } - dport = dst_port; + dport = ports[1]; if (dport == svc->port && dest->port) dport = dest->port; @@ -349,13 +317,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc, /* * Create a new connection according to the template */ - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr, - src_port, &iph.daddr, dst_port, ¶m); - - cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest, skb->mark); + ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0], + &iph.daddr, ports[1], ¶m); + cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest); if (cp == NULL) { ip_vs_conn_put(ct); - *ignored = -1; return NULL; } @@ -375,27 +341,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc, * It selects a server according to the virtual service, and * creates a connection entry. * Protocols supported: TCP, UDP - * - * Usage of *ignored - * - * 1 : protocol tried to schedule (eg. on SYN), found svc but the - * svc/scheduler decides that this packet should be accepted with - * NF_ACCEPT because it must not be scheduled. - * - * 0 : scheduler can not find destination, so try bypass or - * return ICMP and then NF_DROP (ip_vs_leave). - * - * -1 : scheduler tried to schedule but fatal error occurred, eg. - * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param - * failure such as missing Call-ID, ENOMEM on skb_linearize - * or pe_data. In this case we should return NF_DROP without - * any attempts to send ICMP with ip_vs_leave. */ struct ip_vs_conn * ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, - struct ip_vs_proto_data *pd, int *ignored) + struct ip_vs_protocol *pp, int *ignored) { - struct ip_vs_protocol *pp = pd->pp; struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; struct ip_vs_dest *dest; @@ -421,10 +371,12 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, } /* - * Do not schedule replies from local real server. + * Do not schedule replies from local real server. It is risky + * for fwmark services but mostly for persistent services. */ if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && - (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) { + (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) && + (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) { IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, "Not scheduling reply for existing connection"); __ip_vs_conn_put(cp); @@ -434,10 +386,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, /* * Persistent service */ - if (svc->flags & IP_VS_SVC_F_PERSISTENT) - return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored); - - *ignored = 0; + if (svc->flags & IP_VS_SVC_F_PERSISTENT) { + *ignored = 0; + return ip_vs_sched_persist(svc, skb, pptr); + } /* * Non-persistent service @@ -450,6 +402,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, return NULL; } + *ignored = 0; + dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "Schedule: no dest found.\n"); @@ -465,17 +419,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, */ { struct ip_vs_conn_param p; - - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, - &iph.saddr, pptr[0], &iph.daddr, pptr[1], - &p); + ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, + pptr[0], &iph.daddr, pptr[1], &p); cp = ip_vs_conn_new(&p, &dest->addr, dest->port ? dest->port : pptr[1], - flags, dest, skb->mark); - if (!cp) { - *ignored = -1; + flags, dest); + if (!cp) return NULL; - } } IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u " @@ -497,14 +447,11 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, * no destination is available for a new connection. */ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, - struct ip_vs_proto_data *pd) + struct ip_vs_protocol *pp) { - struct net *net; - struct netns_ipvs *ipvs; __be16 _ports[2], *pptr; struct ip_vs_iphdr iph; int unicast; - ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); @@ -512,20 +459,18 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, ip_vs_service_put(svc); return NF_DROP; } - net = skb_net(skb); #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; else #endif - unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST); + unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST); /* if it is fwmark-based service, the cache_bypass sysctl is up and the destination is a non-local unicast, then create a cache_bypass connection entry */ - ipvs = net_ipvs(net); - if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) { + if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { int ret, cs; struct ip_vs_conn *cp; unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && @@ -539,12 +484,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); { struct ip_vs_conn_param p; - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, + ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, pptr[0], &iph.daddr, pptr[1], &p); cp = ip_vs_conn_new(&p, &daddr, 0, IP_VS_CONN_F_BYPASS | flags, - NULL, skb->mark); + NULL); if (!cp) return NF_DROP; } @@ -553,10 +498,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, ip_vs_in_stats(cp, skb); /* set state */ - cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); + cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); /* transmit the first SYN packet */ - ret = cp->packet_xmit(skb, cp, pd->pp); + ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ atomic_inc(&cp->in_pkts); @@ -737,7 +682,6 @@ static int handle_response_icmp(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, unsigned int offset, unsigned int ihl) { - struct netns_ipvs *ipvs; unsigned int verdict = NF_DROP; if (IP_VS_FWD_METHOD(cp) != 0) { @@ -759,8 +703,6 @@ static int handle_response_icmp(int af, struct sk_buff *skb, if (!skb_make_writable(skb, offset)) goto out; - ipvs = net_ipvs(skb_net(skb)); - #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_nat_icmp_v6(skb, pp, cp, 1); @@ -770,11 +712,11 @@ static int handle_response_icmp(int af, struct sk_buff *skb, #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { - if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0) + if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0) goto out; } else #endif - if ((ipvs->sysctl_snat_reroute || + if ((sysctl_ip_vs_snat_reroute || skb_rtable(skb)->rt_flags & RTCF_LOCAL) && ip_route_me_harder(skb, RTN_LOCAL) != 0) goto out; @@ -866,7 +808,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related, ip_vs_fill_iphdr(AF_INET, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ - cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); + cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); if (!cp) return NF_ACCEPT; @@ -943,7 +885,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, ip_vs_fill_iphdr(AF_INET6, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ - cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); + cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); if (!cp) return NF_ACCEPT; @@ -982,12 +924,9 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) * Used for NAT and local client. */ static unsigned int -handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int ihl) { - struct ip_vs_protocol *pp = pd->pp; - struct netns_ipvs *ipvs; - IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); if (!skb_make_writable(skb, ihl)) @@ -1022,15 +961,13 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * if it came from this machine itself. So re-compute * the routing information. */ - ipvs = net_ipvs(skb_net(skb)); - #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { - if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0) + if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0) goto drop; } else #endif - if ((ipvs->sysctl_snat_reroute || + if ((sysctl_ip_vs_snat_reroute || skb_rtable(skb)->rt_flags & RTCF_LOCAL) && ip_route_me_harder(skb, RTN_LOCAL) != 0) goto drop; @@ -1038,7 +975,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); ip_vs_out_stats(cp, skb); - ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd); + ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); skb->ipvs_property = 1; if (!(cp->flags & IP_VS_CONN_F_NFCT)) ip_vs_notrack(skb); @@ -1062,12 +999,9 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, static unsigned int ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) { - struct net *net = NULL; struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; - struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; - struct netns_ipvs *ipvs; EnterFunction(11); @@ -1088,7 +1022,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) if (unlikely(!skb_dst(skb))) return NF_ACCEPT; - net = skb_net(skb); ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { @@ -1112,10 +1045,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } - pd = ip_vs_proto_data_get(net, iph.protocol); - if (unlikely(!pd)) + pp = ip_vs_proto_get(iph.protocol); + if (unlikely(!pp)) return NF_ACCEPT; - pp = pd->pp; /* reassemble IP fragments */ #ifdef CONFIG_IP_VS_IPV6 @@ -1141,12 +1073,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) /* * Check if the packet belongs to an existing entry */ - cp = pp->conn_out_get(af, skb, &iph, iph.len, 0); - ipvs = net_ipvs(net); + cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); if (likely(cp)) - return handle_response(af, skb, pd, cp, iph.len); - if (ipvs->sysctl_nat_icmp_send && + return handle_response(af, skb, pp, cp, iph.len); + if (sysctl_ip_vs_nat_icmp_send && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP || pp->protocol == IPPROTO_SCTP)) { @@ -1156,7 +1087,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) sizeof(_ports), _ports); if (pptr == NULL) return NF_ACCEPT; /* Not for me */ - if (ip_vs_lookup_real_service(net, af, iph.protocol, + if (ip_vs_lookup_real_service(af, iph.protocol, &iph.saddr, pptr[0])) { /* @@ -1271,14 +1202,12 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb, static int ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) { - struct net *net = NULL; struct iphdr *iph; struct icmphdr _icmph, *ic; struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; - struct ip_vs_proto_data *pd; unsigned int offset, ihl, verdict; union nf_inet_addr snet; @@ -1320,11 +1249,9 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ - net = skb_net(skb); - pd = ip_vs_proto_data_get(net, cih->protocol); - if (!pd) + pp = ip_vs_proto_get(cih->protocol); + if (!pp) return NF_ACCEPT; - pp = pd->pp; /* Is the embedded protocol header present? */ if (unlikely(cih->frag_off & htons(IP_OFFSET) && @@ -1338,10 +1265,10 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) ip_vs_fill_iphdr(AF_INET, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ - cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1); + cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1); if (!cp) { /* The packet could also belong to a local client */ - cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); + cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); if (cp) { snet.ip = iph->saddr; return handle_response_icmp(AF_INET, skb, &snet, @@ -1385,7 +1312,6 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) { - struct net *net = NULL; struct ipv6hdr *iph; struct icmp6hdr _icmph, *ic; struct ipv6hdr _ciph, *cih; /* The ip header contained @@ -1393,7 +1319,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; - struct ip_vs_proto_data *pd; unsigned int offset, verdict; union nf_inet_addr snet; struct rt6_info *rt; @@ -1436,11 +1361,9 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ - net = skb_net(skb); - pd = ip_vs_proto_data_get(net, cih->nexthdr); - if (!pd) + pp = ip_vs_proto_get(cih->nexthdr); + if (!pp) return NF_ACCEPT; - pp = pd->pp; /* Is the embedded protocol header present? */ /* TODO: we don't support fragmentation at the moment anyways */ @@ -1454,10 +1377,10 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) ip_vs_fill_iphdr(AF_INET6, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ - cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1); + cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1); if (!cp) { /* The packet could also belong to a local client */ - cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); + cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); if (cp) { ipv6_addr_copy(&snet.in6, &iph->saddr); return handle_response_icmp(AF_INET6, skb, &snet, @@ -1500,13 +1423,10 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) static unsigned int ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) { - struct net *net; struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; - struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; int ret, restart, pkts; - struct netns_ipvs *ipvs; /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) @@ -1560,21 +1480,20 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } - net = skb_net(skb); /* Protocol supported? */ - pd = ip_vs_proto_data_get(net, iph.protocol); - if (unlikely(!pd)) + pp = ip_vs_proto_get(iph.protocol); + if (unlikely(!pp)) return NF_ACCEPT; - pp = pd->pp; + /* * Check if the packet belongs to an existing connection entry */ - cp = pp->conn_in_get(af, skb, &iph, iph.len, 0); + cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0); if (unlikely(!cp)) { int v; - if (!pp->conn_schedule(af, skb, pd, &v, &cp)) + if (!pp->conn_schedule(af, skb, pp, &v, &cp)) return v; } @@ -1586,13 +1505,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); - net = skb_net(skb); - ipvs = net_ipvs(net); + /* Check the server status */ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ - if (ipvs->sysctl_expire_nodest_conn) { + if (sysctl_ip_vs_expire_nodest_conn) { /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); } @@ -1603,7 +1521,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } ip_vs_in_stats(cp, skb); - restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); + restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); if (cp->packet_xmit) ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ @@ -1617,41 +1535,35 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) * * Sync connection if it is about to close to * encorage the standby servers to update the connections timeout - * - * For ONE_PKT let ip_vs_sync_conn() do the filter work. */ - - if (cp->flags & IP_VS_CONN_F_ONE_PACKET) - pkts = ipvs->sysctl_sync_threshold[0]; - else - pkts = atomic_add_return(1, &cp->in_pkts); - - if ((ipvs->sync_state & IP_VS_STATE_MASTER) && + pkts = atomic_add_return(1, &cp->in_pkts); + if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) && cp->protocol == IPPROTO_SCTP) { if ((cp->state == IP_VS_SCTP_S_ESTABLISHED && - (pkts % ipvs->sysctl_sync_threshold[1] - == ipvs->sysctl_sync_threshold[0])) || + (pkts % sysctl_ip_vs_sync_threshold[1] + == sysctl_ip_vs_sync_threshold[0])) || (cp->old_state != cp->state && ((cp->state == IP_VS_SCTP_S_CLOSED) || (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) || (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) { - ip_vs_sync_conn(net, cp); + ip_vs_sync_conn(cp); goto out; } } /* Keep this block last: TCP and others with pp->num_states <= 1 */ - else if ((ipvs->sync_state & IP_VS_STATE_MASTER) && + else if (af == AF_INET && + (ip_vs_sync_state & IP_VS_STATE_MASTER) && (((cp->protocol != IPPROTO_TCP || cp->state == IP_VS_TCP_S_ESTABLISHED) && - (pkts % ipvs->sysctl_sync_threshold[1] - == ipvs->sysctl_sync_threshold[0])) || + (pkts % sysctl_ip_vs_sync_threshold[1] + == sysctl_ip_vs_sync_threshold[0])) || ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && ((cp->state == IP_VS_TCP_S_FIN_WAIT) || (cp->state == IP_VS_TCP_S_CLOSE) || (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || (cp->state == IP_VS_TCP_S_TIME_WAIT))))) - ip_vs_sync_conn(net, cp); + ip_vs_sync_conn(cp); out: cp->old_state = cp->state; @@ -1870,39 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { }, #endif }; -/* - * Initialize IP Virtual Server netns mem. - */ -static int __net_init __ip_vs_init(struct net *net) -{ - struct netns_ipvs *ipvs; - - ipvs = net_generic(net, ip_vs_net_id); - if (ipvs == NULL) { - pr_err("%s(): no memory.\n", __func__); - return -ENOMEM; - } - ipvs->net = net; - /* Counters used for creating unique names */ - ipvs->gen = atomic_read(&ipvs_netns_cnt); - atomic_inc(&ipvs_netns_cnt); - net->ipvs = ipvs; - printk(KERN_INFO "IPVS: Creating netns size=%lu id=%d\n", - sizeof(struct netns_ipvs), ipvs->gen); - return 0; -} -static void __net_exit __ip_vs_cleanup(struct net *net) -{ - IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); -} - -static struct pernet_operations ipvs_core_ops = { - .init = __ip_vs_init, - .exit = __ip_vs_cleanup, - .id = &ip_vs_net_id, - .size = sizeof(struct netns_ipvs), -}; /* * Initialize IP Virtual Server @@ -1911,11 +1791,8 @@ static int __init ip_vs_init(void) { int ret; - ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ - if (ret < 0) - return ret; - ip_vs_estimator_init(); + ret = ip_vs_control_init(); if (ret < 0) { pr_err("can't setup control.\n"); @@ -1936,23 +1813,15 @@ static int __init ip_vs_init(void) goto cleanup_app; } - ret = ip_vs_sync_init(); - if (ret < 0) { - pr_err("can't setup sync data.\n"); - goto cleanup_conn; - } - ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); if (ret < 0) { pr_err("can't register hooks.\n"); - goto cleanup_sync; + goto cleanup_conn; } pr_info("ipvs loaded.\n"); return ret; -cleanup_sync: - ip_vs_sync_cleanup(); cleanup_conn: ip_vs_conn_cleanup(); cleanup_app: @@ -1962,20 +1831,17 @@ static int __init ip_vs_init(void) ip_vs_control_cleanup(); cleanup_estimator: ip_vs_estimator_cleanup(); - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ return ret; } static void __exit ip_vs_cleanup(void) { nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - ip_vs_sync_cleanup(); ip_vs_conn_cleanup(); ip_vs_app_cleanup(); ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); ip_vs_estimator_cleanup(); - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ pr_info("ipvs unloaded.\n"); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index 98df59a12453..22f7ad5101ab 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -38,7 +38,6 @@ #include #include -#include #include #ifdef CONFIG_IP_VS_IPV6 #include @@ -58,7 +57,42 @@ static DEFINE_MUTEX(__ip_vs_mutex); /* lock for service table */ static DEFINE_RWLOCK(__ip_vs_svc_lock); +/* lock for table with the real services */ +static DEFINE_RWLOCK(__ip_vs_rs_lock); + +/* lock for state and timeout tables */ +static DEFINE_SPINLOCK(ip_vs_securetcp_lock); + +/* lock for drop entry handling */ +static DEFINE_SPINLOCK(__ip_vs_dropentry_lock); + +/* lock for drop packet handling */ +static DEFINE_SPINLOCK(__ip_vs_droppacket_lock); + +/* 1/rate drop and drop-entry variables */ +int ip_vs_drop_rate = 0; +int ip_vs_drop_counter = 0; +static atomic_t ip_vs_dropentry = ATOMIC_INIT(0); + +/* number of virtual services */ +static int ip_vs_num_services = 0; + /* sysctl variables */ +static int sysctl_ip_vs_drop_entry = 0; +static int sysctl_ip_vs_drop_packet = 0; +static int sysctl_ip_vs_secure_tcp = 0; +static int sysctl_ip_vs_amemthresh = 1024; +static int sysctl_ip_vs_am_droprate = 10; +int sysctl_ip_vs_cache_bypass = 0; +int sysctl_ip_vs_expire_nodest_conn = 0; +int sysctl_ip_vs_expire_quiescent_template = 0; +int sysctl_ip_vs_sync_threshold[2] = { 3, 50 }; +int sysctl_ip_vs_nat_icmp_send = 0; +#ifdef CONFIG_IP_VS_NFCT +int sysctl_ip_vs_conntrack; +#endif +int sysctl_ip_vs_snat_reroute = 1; + #ifdef CONFIG_IP_VS_DEBUG static int sysctl_ip_vs_debug_level = 0; @@ -71,8 +105,7 @@ int ip_vs_get_debug_level(void) #ifdef CONFIG_IP_VS_IPV6 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ -static int __ip_vs_addr_is_local_v6(struct net *net, - const struct in6_addr *addr) +static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr) { struct rt6_info *rt; struct flowi fl = { @@ -81,7 +114,7 @@ static int __ip_vs_addr_is_local_v6(struct net *net, .fl6_src = { .s6_addr32 = {0, 0, 0, 0} }, }; - rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl); + rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK)) return 1; @@ -92,7 +125,7 @@ static int __ip_vs_addr_is_local_v6(struct net *net, * update_defense_level is called from keventd and from sysctl, * so it needs to protect itself from softirqs */ -static void update_defense_level(struct netns_ipvs *ipvs) +static void update_defense_level(void) { struct sysinfo i; static int old_secure_tcp = 0; @@ -108,73 +141,73 @@ static void update_defense_level(struct netns_ipvs *ipvs) /* si_swapinfo(&i); */ /* availmem = availmem - (i.totalswap - i.freeswap); */ - nomem = (availmem < ipvs->sysctl_amemthresh); + nomem = (availmem < sysctl_ip_vs_amemthresh); local_bh_disable(); /* drop_entry */ - spin_lock(&ipvs->dropentry_lock); - switch (ipvs->sysctl_drop_entry) { + spin_lock(&__ip_vs_dropentry_lock); + switch (sysctl_ip_vs_drop_entry) { case 0: - atomic_set(&ipvs->dropentry, 0); + atomic_set(&ip_vs_dropentry, 0); break; case 1: if (nomem) { - atomic_set(&ipvs->dropentry, 1); - ipvs->sysctl_drop_entry = 2; + atomic_set(&ip_vs_dropentry, 1); + sysctl_ip_vs_drop_entry = 2; } else { - atomic_set(&ipvs->dropentry, 0); + atomic_set(&ip_vs_dropentry, 0); } break; case 2: if (nomem) { - atomic_set(&ipvs->dropentry, 1); + atomic_set(&ip_vs_dropentry, 1); } else { - atomic_set(&ipvs->dropentry, 0); - ipvs->sysctl_drop_entry = 1; + atomic_set(&ip_vs_dropentry, 0); + sysctl_ip_vs_drop_entry = 1; }; break; case 3: - atomic_set(&ipvs->dropentry, 1); + atomic_set(&ip_vs_dropentry, 1); break; } - spin_unlock(&ipvs->dropentry_lock); + spin_unlock(&__ip_vs_dropentry_lock); /* drop_packet */ - spin_lock(&ipvs->droppacket_lock); - switch (ipvs->sysctl_drop_packet) { + spin_lock(&__ip_vs_droppacket_lock); + switch (sysctl_ip_vs_drop_packet) { case 0: - ipvs->drop_rate = 0; + ip_vs_drop_rate = 0; break; case 1: if (nomem) { - ipvs->drop_rate = ipvs->drop_counter - = ipvs->sysctl_amemthresh / - (ipvs->sysctl_amemthresh-availmem); - ipvs->sysctl_drop_packet = 2; + ip_vs_drop_rate = ip_vs_drop_counter + = sysctl_ip_vs_amemthresh / + (sysctl_ip_vs_amemthresh-availmem); + sysctl_ip_vs_drop_packet = 2; } else { - ipvs->drop_rate = 0; + ip_vs_drop_rate = 0; } break; case 2: if (nomem) { - ipvs->drop_rate = ipvs->drop_counter - = ipvs->sysctl_amemthresh / - (ipvs->sysctl_amemthresh-availmem); + ip_vs_drop_rate = ip_vs_drop_counter + = sysctl_ip_vs_amemthresh / + (sysctl_ip_vs_amemthresh-availmem); } else { - ipvs->drop_rate = 0; - ipvs->sysctl_drop_packet = 1; + ip_vs_drop_rate = 0; + sysctl_ip_vs_drop_packet = 1; } break; case 3: - ipvs->drop_rate = ipvs->sysctl_am_droprate; + ip_vs_drop_rate = sysctl_ip_vs_am_droprate; break; } - spin_unlock(&ipvs->droppacket_lock); + spin_unlock(&__ip_vs_droppacket_lock); /* secure_tcp */ - spin_lock(&ipvs->securetcp_lock); - switch (ipvs->sysctl_secure_tcp) { + spin_lock(&ip_vs_securetcp_lock); + switch (sysctl_ip_vs_secure_tcp) { case 0: if (old_secure_tcp >= 2) to_change = 0; @@ -183,7 +216,7 @@ static void update_defense_level(struct netns_ipvs *ipvs) if (nomem) { if (old_secure_tcp < 2) to_change = 1; - ipvs->sysctl_secure_tcp = 2; + sysctl_ip_vs_secure_tcp = 2; } else { if (old_secure_tcp >= 2) to_change = 0; @@ -196,7 +229,7 @@ static void update_defense_level(struct netns_ipvs *ipvs) } else { if (old_secure_tcp >= 2) to_change = 0; - ipvs->sysctl_secure_tcp = 1; + sysctl_ip_vs_secure_tcp = 1; } break; case 3: @@ -204,11 +237,10 @@ static void update_defense_level(struct netns_ipvs *ipvs) to_change = 1; break; } - old_secure_tcp = ipvs->sysctl_secure_tcp; + old_secure_tcp = sysctl_ip_vs_secure_tcp; if (to_change >= 0) - ip_vs_protocol_timeout_change(ipvs, - ipvs->sysctl_secure_tcp > 1); - spin_unlock(&ipvs->securetcp_lock); + ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); + spin_unlock(&ip_vs_securetcp_lock); local_bh_enable(); } @@ -218,16 +250,16 @@ static void update_defense_level(struct netns_ipvs *ipvs) * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ +static void defense_work_handler(struct work_struct *work); +static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); static void defense_work_handler(struct work_struct *work) { - struct netns_ipvs *ipvs = - container_of(work, struct netns_ipvs, defense_work.work); + update_defense_level(); + if (atomic_read(&ip_vs_dropentry)) + ip_vs_random_dropentry(); - update_defense_level(ipvs); - if (atomic_read(&ipvs->dropentry)) - ip_vs_random_dropentry(ipvs->net); - schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD); + schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); } int @@ -255,13 +287,33 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE]; /* the service table hashed by fwmark */ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; +/* + * Hash table: for real service lookups + */ +#define IP_VS_RTAB_BITS 4 +#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) +#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) + +static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE]; + +/* + * Trash for destinations + */ +static LIST_HEAD(ip_vs_dest_trash); + +/* + * FTP & NULL virtual service counters + */ +static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0); +static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0); + /* * Returns hash value for virtual service */ -static inline unsigned -ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, - const union nf_inet_addr *addr, __be16 port) +static __inline__ unsigned +ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr, + __be16 port) { register unsigned porth = ntohs(port); __be32 addr_fold = addr->ip; @@ -271,7 +323,6 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif - addr_fold ^= ((size_t)net>>8); return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth) & IP_VS_SVC_TAB_MASK; @@ -280,13 +331,13 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, /* * Returns hash value of fwmark for virtual service lookup */ -static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) +static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark) { - return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; + return fwmark & IP_VS_SVC_TAB_MASK; } /* - * Hashes a service in the ip_vs_svc_table by + * Hashes a service in the ip_vs_svc_table by * or in the ip_vs_svc_fwm_table by fwmark. * Should be called with locked tables. */ @@ -302,16 +353,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) if (svc->fwmark == 0) { /* - * Hash it by in ip_vs_svc_table + * Hash it by in ip_vs_svc_table */ - hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol, - &svc->addr, svc->port); + hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr, + svc->port); list_add(&svc->s_list, &ip_vs_svc_table[hash]); } else { /* - * Hash it by fwmark in svc_fwm_table + * Hash it by fwmark in ip_vs_svc_fwm_table */ - hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark); + hash = ip_vs_svc_fwm_hashkey(svc->fwmark); list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]); } @@ -323,7 +374,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) /* - * Unhashes a service from svc_table / svc_fwm_table. + * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table. * Should be called with locked tables. */ static int ip_vs_svc_unhash(struct ip_vs_service *svc) @@ -335,10 +386,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc) } if (svc->fwmark == 0) { - /* Remove it from the svc_table table */ + /* Remove it from the ip_vs_svc_table table */ list_del(&svc->s_list); } else { - /* Remove it from the svc_fwm_table table */ + /* Remove it from the ip_vs_svc_fwm_table table */ list_del(&svc->f_list); } @@ -349,24 +400,23 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc) /* - * Get service by {netns, proto,addr,port} in the service table. + * Get service by {proto,addr,port} in the service table. */ static inline struct ip_vs_service * -__ip_vs_service_find(struct net *net, int af, __u16 protocol, - const union nf_inet_addr *vaddr, __be16 vport) +__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr, + __be16 vport) { unsigned hash; struct ip_vs_service *svc; /* Check for "full" addressed entries */ - hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport); + hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport); list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ if ((svc->af == af) && ip_vs_addr_equal(af, &svc->addr, vaddr) && (svc->port == vport) - && (svc->protocol == protocol) - && net_eq(svc->net, net)) { + && (svc->protocol == protocol)) { /* HIT */ return svc; } @@ -380,17 +430,16 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol, * Get service by {fwmark} in the service table. */ static inline struct ip_vs_service * -__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) +__ip_vs_svc_fwm_find(int af, __u32 fwmark) { unsigned hash; struct ip_vs_service *svc; /* Check for fwmark addressed entries */ - hash = ip_vs_svc_fwm_hashkey(net, fwmark); + hash = ip_vs_svc_fwm_hashkey(fwmark); list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { - if (svc->fwmark == fwmark && svc->af == af - && net_eq(svc->net, net)) { + if (svc->fwmark == fwmark && svc->af == af) { /* HIT */ return svc; } @@ -400,44 +449,42 @@ __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) } struct ip_vs_service * -ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, +ip_vs_service_get(int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { struct ip_vs_service *svc; - struct netns_ipvs *ipvs = net_ipvs(net); read_lock(&__ip_vs_svc_lock); /* * Check the table hashed by fwmark first */ - svc = __ip_vs_svc_fwm_find(net, af, fwmark); - if (fwmark && svc) + if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark))) goto out; /* * Check the table hashed by * for "full" addressed entries */ - svc = __ip_vs_service_find(net, af, protocol, vaddr, vport); + svc = __ip_vs_service_find(af, protocol, vaddr, vport); if (svc == NULL && protocol == IPPROTO_TCP - && atomic_read(&ipvs->ftpsvc_counter) + && atomic_read(&ip_vs_ftpsvc_counter) && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) { /* * Check if ftp service entry exists, the packet * might belong to FTP data connections. */ - svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT); + svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT); } if (svc == NULL - && atomic_read(&ipvs->nullsvc_counter)) { + && atomic_read(&ip_vs_nullsvc_counter)) { /* * Check if the catch-all port (port zero) exists */ - svc = __ip_vs_service_find(net, af, protocol, vaddr, 0); + svc = __ip_vs_service_find(af, protocol, vaddr, 0); } out: @@ -472,7 +519,6 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) svc->fwmark, IP_VS_DBG_ADDR(svc->af, &svc->addr), ntohs(svc->port), atomic_read(&svc->usecnt)); - free_percpu(svc->stats.cpustats); kfree(svc); } } @@ -499,10 +545,10 @@ static inline unsigned ip_vs_rs_hashkey(int af, } /* - * Hashes ip_vs_dest in rs_table by . + * Hashes ip_vs_dest in ip_vs_rtable by . * should be called with locked tables. */ -static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) +static int ip_vs_rs_hash(struct ip_vs_dest *dest) { unsigned hash; @@ -516,19 +562,19 @@ static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) */ hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); - list_add(&dest->d_list, &ipvs->rs_table[hash]); + list_add(&dest->d_list, &ip_vs_rtable[hash]); return 1; } /* - * UNhashes ip_vs_dest from rs_table. + * UNhashes ip_vs_dest from ip_vs_rtable. * should be called with locked tables. */ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) { /* - * Remove it from the rs_table table. + * Remove it from the ip_vs_rtable table. */ if (!list_empty(&dest->d_list)) { list_del(&dest->d_list); @@ -542,11 +588,10 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) * Lookup real service by in the real service table. */ struct ip_vs_dest * -ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, +ip_vs_lookup_real_service(int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport) { - struct netns_ipvs *ipvs = net_ipvs(net); unsigned hash; struct ip_vs_dest *dest; @@ -556,19 +601,19 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, */ hash = ip_vs_rs_hashkey(af, daddr, dport); - read_lock(&ipvs->rs_lock); - list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) { + read_lock(&__ip_vs_rs_lock); + list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { if ((dest->af == af) && ip_vs_addr_equal(af, &dest->addr, daddr) && (dest->port == dport) && ((dest->protocol == protocol) || dest->vfwmark)) { /* HIT */ - read_unlock(&ipvs->rs_lock); + read_unlock(&__ip_vs_rs_lock); return dest; } } - read_unlock(&ipvs->rs_lock); + read_unlock(&__ip_vs_rs_lock); return NULL; } @@ -607,16 +652,15 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, * ip_vs_lookup_real_service() looked promissing, but * seems not working as expected. */ -struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, - const union nf_inet_addr *daddr, +struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, - __be16 vport, __u16 protocol, __u32 fwmark) + __be16 vport, __u16 protocol) { struct ip_vs_dest *dest; struct ip_vs_service *svc; - svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport); + svc = ip_vs_service_get(af, 0, protocol, vaddr, vport); if (!svc) return NULL; dest = ip_vs_lookup_dest(svc, daddr, dport); @@ -641,12 +685,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest, *nxt; - struct netns_ipvs *ipvs = net_ipvs(svc->net); /* * Find the destination in trash */ - list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) { + list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " "dest->refcnt=%d\n", dest->vfwmark, @@ -677,7 +720,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); - free_percpu(dest->stats.cpustats); kfree(dest); } } @@ -695,16 +737,14 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, * are expired, and the refcnt of each destination in the trash must * be 1, so we simply release them here. */ -static void ip_vs_trash_cleanup(struct net *net) +static void ip_vs_trash_cleanup(void) { struct ip_vs_dest *dest, *nxt; - struct netns_ipvs *ipvs = net_ipvs(net); - list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) { + list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); - free_percpu(dest->stats.cpustats); kfree(dest); } } @@ -728,7 +768,6 @@ static void __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest, int add) { - struct netns_ipvs *ipvs = net_ipvs(svc->net); int conn_flags; /* set the weight and the flags */ @@ -741,12 +780,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, conn_flags |= IP_VS_CONN_F_NOOUTPUT; } else { /* - * Put the real service in rs_table if not present. + * Put the real service in ip_vs_rtable if not present. * For now only for NAT! */ - write_lock_bh(&ipvs->rs_lock); - ip_vs_rs_hash(ipvs, dest); - write_unlock_bh(&ipvs->rs_lock); + write_lock_bh(&__ip_vs_rs_lock); + ip_vs_rs_hash(dest); + write_unlock_bh(&__ip_vs_rs_lock); } atomic_set(&dest->conn_flags, conn_flags); @@ -774,7 +813,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, spin_unlock(&dest->dst_lock); if (add) - ip_vs_new_estimator(svc->net, &dest->stats); + ip_vs_new_estimator(&dest->stats); write_lock_bh(&__ip_vs_svc_lock); @@ -811,12 +850,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && - !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6)) + !__ip_vs_addr_is_local_v6(&udest->addr.in6)) return -EINVAL; } else #endif { - atype = inet_addr_type(svc->net, udest->addr.ip); + atype = inet_addr_type(&init_net, udest->addr.ip); if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; } @@ -826,11 +865,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, pr_err("%s(): no memory.\n", __func__); return -ENOMEM; } - dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!dest->stats.cpustats) { - pr_err("%s() alloc_percpu failed\n", __func__); - goto err_alloc; - } dest->af = svc->af; dest->protocol = svc->protocol; @@ -854,10 +888,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, LeaveFunction(2); return 0; - -err_alloc: - kfree(dest); - return -ENOMEM; } @@ -976,18 +1006,16 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) /* * Delete a destination (must be already unlinked from the service) */ -static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest) +static void __ip_vs_del_dest(struct ip_vs_dest *dest) { - struct netns_ipvs *ipvs = net_ipvs(net); - - ip_vs_kill_estimator(net, &dest->stats); + ip_vs_kill_estimator(&dest->stats); /* * Remove it from the d-linked list with the real services. */ - write_lock_bh(&ipvs->rs_lock); + write_lock_bh(&__ip_vs_rs_lock); ip_vs_rs_unhash(dest); - write_unlock_bh(&ipvs->rs_lock); + write_unlock_bh(&__ip_vs_rs_lock); /* * Decrease the refcnt of the dest, and free the dest @@ -1006,7 +1034,6 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest) and only one user context can update virtual service at a time, so the operation here is OK */ atomic_dec(&dest->svc->refcnt); - free_percpu(dest->stats.cpustats); kfree(dest); } else { IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, " @@ -1014,7 +1041,7 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest) IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); - list_add(&dest->n_list, &ipvs->dest_trash); + list_add(&dest->n_list, &ip_vs_dest_trash); atomic_inc(&dest->refcnt); } } @@ -1078,7 +1105,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) /* * Delete the destination */ - __ip_vs_del_dest(svc->net, dest); + __ip_vs_del_dest(dest); LeaveFunction(2); @@ -1090,14 +1117,13 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) * Add a service into the service hash table */ static int -ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, +ip_vs_add_service(struct ip_vs_service_user_kern *u, struct ip_vs_service **svc_p) { int ret = 0; struct ip_vs_scheduler *sched = NULL; struct ip_vs_pe *pe = NULL; struct ip_vs_service *svc = NULL; - struct netns_ipvs *ipvs = net_ipvs(net); /* increase the module use count */ ip_vs_use_count_inc(); @@ -1111,7 +1137,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, } if (u->pe_name && *u->pe_name) { - pe = ip_vs_pe_getbyname(u->pe_name); + pe = ip_vs_pe_get(u->pe_name); if (pe == NULL) { pr_info("persistence engine module ip_vs_pe_%s " "not found\n", u->pe_name); @@ -1133,11 +1159,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, ret = -ENOMEM; goto out_err; } - svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!svc->stats.cpustats) { - pr_err("%s() alloc_percpu failed\n", __func__); - goto out_err; - } /* I'm the first user of the service */ atomic_set(&svc->usecnt, 0); @@ -1151,7 +1172,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, svc->flags = u->flags; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; - svc->net = net; INIT_LIST_HEAD(&svc->destinations); rwlock_init(&svc->sched_lock); @@ -1169,15 +1189,15 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, /* Update the virtual service counters */ if (svc->port == FTPPORT) - atomic_inc(&ipvs->ftpsvc_counter); + atomic_inc(&ip_vs_ftpsvc_counter); else if (svc->port == 0) - atomic_inc(&ipvs->nullsvc_counter); + atomic_inc(&ip_vs_nullsvc_counter); - ip_vs_new_estimator(net, &svc->stats); + ip_vs_new_estimator(&svc->stats); /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) - ipvs->num_services++; + ip_vs_num_services++; /* Hash the service into the service table */ write_lock_bh(&__ip_vs_svc_lock); @@ -1187,7 +1207,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, *svc_p = svc; return 0; - out_err: if (svc != NULL) { ip_vs_unbind_scheduler(svc); @@ -1196,8 +1215,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, ip_vs_app_inc_put(svc->inc); local_bh_enable(); } - if (svc->stats.cpustats) - free_percpu(svc->stats.cpustats); kfree(svc); } ip_vs_scheduler_put(sched); @@ -1231,7 +1248,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) old_sched = sched; if (u->pe_name && *u->pe_name) { - pe = ip_vs_pe_getbyname(u->pe_name); + pe = ip_vs_pe_get(u->pe_name); if (pe == NULL) { pr_info("persistence engine module ip_vs_pe_%s " "not found\n", u->pe_name); @@ -1317,15 +1334,14 @@ static void __ip_vs_del_service(struct ip_vs_service *svc) struct ip_vs_dest *dest, *nxt; struct ip_vs_scheduler *old_sched; struct ip_vs_pe *old_pe; - struct netns_ipvs *ipvs = net_ipvs(svc->net); pr_info("%s: enter\n", __func__); /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) - ipvs->num_services--; + ip_vs_num_services--; - ip_vs_kill_estimator(svc->net, &svc->stats); + ip_vs_kill_estimator(&svc->stats); /* Unbind scheduler */ old_sched = svc->scheduler; @@ -1348,16 +1364,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc) */ list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) { __ip_vs_unlink_dest(svc, dest, 0); - __ip_vs_del_dest(svc->net, dest); + __ip_vs_del_dest(dest); } /* * Update the virtual service counters */ if (svc->port == FTPPORT) - atomic_dec(&ipvs->ftpsvc_counter); + atomic_dec(&ip_vs_ftpsvc_counter); else if (svc->port == 0) - atomic_dec(&ipvs->nullsvc_counter); + atomic_dec(&ip_vs_nullsvc_counter); /* * Free the service if nobody refers to it @@ -1367,7 +1383,6 @@ static void __ip_vs_del_service(struct ip_vs_service *svc) svc->fwmark, IP_VS_DBG_ADDR(svc->af, &svc->addr), ntohs(svc->port), atomic_read(&svc->usecnt)); - free_percpu(svc->stats.cpustats); kfree(svc); } @@ -1413,19 +1428,17 @@ static int ip_vs_del_service(struct ip_vs_service *svc) /* * Flush all the virtual services */ -static int ip_vs_flush(struct net *net) +static int ip_vs_flush(void) { int idx; struct ip_vs_service *svc, *nxt; /* - * Flush the service table hashed by + * Flush the service table hashed by */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { - list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], - s_list) { - if (net_eq(svc->net, net)) - ip_vs_unlink_service(svc); + list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) { + ip_vs_unlink_service(svc); } } @@ -1435,8 +1448,7 @@ static int ip_vs_flush(struct net *net) for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry_safe(svc, nxt, &ip_vs_svc_fwm_table[idx], f_list) { - if (net_eq(svc->net, net)) - ip_vs_unlink_service(svc); + ip_vs_unlink_service(svc); } } @@ -1460,26 +1472,24 @@ static int ip_vs_zero_service(struct ip_vs_service *svc) return 0; } -static int ip_vs_zero_all(struct net *net) +static int ip_vs_zero_all(void) { int idx; struct ip_vs_service *svc; for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { - if (net_eq(svc->net, net)) - ip_vs_zero_service(svc); + ip_vs_zero_service(svc); } } for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { - if (net_eq(svc->net, net)) - ip_vs_zero_service(svc); + ip_vs_zero_service(svc); } } - ip_vs_zero_stats(net_ipvs(net)->tot_stats); + ip_vs_zero_stats(&ip_vs_stats); return 0; } @@ -1488,7 +1498,6 @@ static int proc_do_defense_mode(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; int *valp = table->data; int val = *valp; int rc; @@ -1499,7 +1508,7 @@ proc_do_defense_mode(ctl_table *table, int write, /* Restore the correct value */ *valp = val; } else { - update_defense_level(net_ipvs(net)); + update_defense_level(); } } return rc; @@ -1525,54 +1534,45 @@ proc_do_sync_threshold(ctl_table *table, int write, return rc; } -static int -proc_do_sync_mode(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int *valp = table->data; - int val = *valp; - int rc; - - rc = proc_dointvec(table, write, buffer, lenp, ppos); - if (write && (*valp != val)) { - if ((*valp < 0) || (*valp > 1)) { - /* Restore the correct value */ - *valp = val; - } else { - struct net *net = current->nsproxy->net_ns; - ip_vs_sync_switch_mode(net, val); - } - } - return rc; -} /* * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/) - * Do not change order or insert new entries without - * align with netns init in __ip_vs_control_init() */ static struct ctl_table vs_vars[] = { { .procname = "amemthresh", + .data = &sysctl_ip_vs_amemthresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#ifdef CONFIG_IP_VS_DEBUG + { + .procname = "debug_level", + .data = &sysctl_ip_vs_debug_level, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, +#endif { .procname = "am_droprate", + .data = &sysctl_ip_vs_am_droprate, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "drop_entry", + .data = &sysctl_ip_vs_drop_entry, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "drop_packet", + .data = &sysctl_ip_vs_drop_packet, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, @@ -1580,6 +1580,7 @@ static struct ctl_table vs_vars[] = { #ifdef CONFIG_IP_VS_NFCT { .procname = "conntrack", + .data = &sysctl_ip_vs_conntrack, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, @@ -1587,62 +1588,18 @@ static struct ctl_table vs_vars[] = { #endif { .procname = "secure_tcp", + .data = &sysctl_ip_vs_secure_tcp, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "snat_reroute", + .data = &sysctl_ip_vs_snat_reroute, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, }, - { - .procname = "sync_version", - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_do_sync_mode, - }, - { - .procname = "cache_bypass", - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "expire_nodest_conn", - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "expire_quiescent_template", - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "sync_threshold", - .maxlen = - sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold), - .mode = 0644, - .proc_handler = proc_do_sync_threshold, - }, - { - .procname = "nat_icmp_send", - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, -#ifdef CONFIG_IP_VS_DEBUG - { - .procname = "debug_level", - .data = &sysctl_ip_vs_debug_level, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, -#endif #if 0 { .procname = "timeout_established", @@ -1729,6 +1686,41 @@ static struct ctl_table vs_vars[] = { .proc_handler = proc_dointvec_jiffies, }, #endif + { + .procname = "cache_bypass", + .data = &sysctl_ip_vs_cache_bypass, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "expire_nodest_conn", + .data = &sysctl_ip_vs_expire_nodest_conn, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "expire_quiescent_template", + .data = &sysctl_ip_vs_expire_quiescent_template, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sync_threshold", + .data = &sysctl_ip_vs_sync_threshold, + .maxlen = sizeof(sysctl_ip_vs_sync_threshold), + .mode = 0644, + .proc_handler = proc_do_sync_threshold, + }, + { + .procname = "nat_icmp_send", + .data = &sysctl_ip_vs_nat_icmp_send, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { } }; @@ -1740,10 +1732,11 @@ const struct ctl_path net_vs_ctl_path[] = { }; EXPORT_SYMBOL_GPL(net_vs_ctl_path); +static struct ctl_table_header * sysctl_header; + #ifdef CONFIG_PROC_FS struct ip_vs_iter { - struct seq_net_private p; /* Do not move this, netns depends upon it*/ struct list_head *table; int bucket; }; @@ -1770,7 +1763,6 @@ static inline const char *ip_vs_fwd_name(unsigned flags) /* Get the Nth entry in the two lists */ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) { - struct net *net = seq_file_net(seq); struct ip_vs_iter *iter = seq->private; int idx; struct ip_vs_service *svc; @@ -1778,7 +1770,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) /* look in hash by protocol */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { - if (net_eq(svc->net, net) && pos-- == 0) { + if (pos-- == 0){ iter->table = ip_vs_svc_table; iter->bucket = idx; return svc; @@ -1789,7 +1781,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) /* keep looking in fwmark */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { - if (net_eq(svc->net, net) && pos-- == 0) { + if (pos-- == 0) { iter->table = ip_vs_svc_fwm_table; iter->bucket = idx; return svc; @@ -1943,7 +1935,7 @@ static const struct seq_operations ip_vs_info_seq_ops = { static int ip_vs_info_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &ip_vs_info_seq_ops, + return seq_open_private(file, &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)); } @@ -1957,11 +1949,13 @@ static const struct file_operations ip_vs_info_fops = { #endif +struct ip_vs_stats ip_vs_stats = { + .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock), +}; + #ifdef CONFIG_PROC_FS static int ip_vs_stats_show(struct seq_file *seq, void *v) { - struct net *net = seq_file_single_net(seq); - struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, @@ -1969,29 +1963,29 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v) seq_printf(seq, " Conns Packets Packets Bytes Bytes\n"); - spin_lock_bh(&tot_stats->lock); - seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns, - tot_stats->ustats.inpkts, tot_stats->ustats.outpkts, - (unsigned long long) tot_stats->ustats.inbytes, - (unsigned long long) tot_stats->ustats.outbytes); + spin_lock_bh(&ip_vs_stats.lock); + seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns, + ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts, + (unsigned long long) ip_vs_stats.ustats.inbytes, + (unsigned long long) ip_vs_stats.ustats.outbytes); /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); seq_printf(seq,"%8X %8X %8X %16X %16X\n", - tot_stats->ustats.cps, - tot_stats->ustats.inpps, - tot_stats->ustats.outpps, - tot_stats->ustats.inbps, - tot_stats->ustats.outbps); - spin_unlock_bh(&tot_stats->lock); + ip_vs_stats.ustats.cps, + ip_vs_stats.ustats.inpps, + ip_vs_stats.ustats.outpps, + ip_vs_stats.ustats.inbps, + ip_vs_stats.ustats.outbps); + spin_unlock_bh(&ip_vs_stats.lock); return 0; } static int ip_vs_stats_seq_open(struct inode *inode, struct file *file) { - return single_open_net(inode, file, ip_vs_stats_show); + return single_open(file, ip_vs_stats_show, NULL); } static const struct file_operations ip_vs_stats_fops = { @@ -2002,70 +1996,13 @@ static const struct file_operations ip_vs_stats_fops = { .release = single_release, }; -static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) -{ - struct net *net = seq_file_single_net(seq); - struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats; - int i; - -/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ - seq_puts(seq, - " Total Incoming Outgoing Incoming Outgoing\n"); - seq_printf(seq, - "CPU Conns Packets Packets Bytes Bytes\n"); - - for_each_possible_cpu(i) { - struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i); - seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n", - i, u->ustats.conns, u->ustats.inpkts, - u->ustats.outpkts, (__u64)u->ustats.inbytes, - (__u64)u->ustats.outbytes); - } - - spin_lock_bh(&tot_stats->lock); - seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n", - tot_stats->ustats.conns, tot_stats->ustats.inpkts, - tot_stats->ustats.outpkts, - (unsigned long long) tot_stats->ustats.inbytes, - (unsigned long long) tot_stats->ustats.outbytes); - -/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ - seq_puts(seq, - " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); - seq_printf(seq, " %8X %8X %8X %16X %16X\n", - tot_stats->ustats.cps, - tot_stats->ustats.inpps, - tot_stats->ustats.outpps, - tot_stats->ustats.inbps, - tot_stats->ustats.outbps); - spin_unlock_bh(&tot_stats->lock); - - return 0; -} - -static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file) -{ - return single_open_net(inode, file, ip_vs_stats_percpu_show); -} - -static const struct file_operations ip_vs_stats_percpu_fops = { - .owner = THIS_MODULE, - .open = ip_vs_stats_percpu_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; #endif /* * Set timeout values for tcp tcpfin udp in the timeout_table. */ -static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u) +static int ip_vs_set_timeout(struct ip_vs_timeout_user *u) { -#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) - struct ip_vs_proto_data *pd; -#endif - IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n", u->tcp_timeout, u->tcp_fin_timeout, @@ -2073,22 +2010,19 @@ static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u) #ifdef CONFIG_IP_VS_PROTO_TCP if (u->tcp_timeout) { - pd = ip_vs_proto_data_get(net, IPPROTO_TCP); - pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] + ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] = u->tcp_timeout * HZ; } if (u->tcp_fin_timeout) { - pd = ip_vs_proto_data_get(net, IPPROTO_TCP); - pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] + ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] = u->tcp_fin_timeout * HZ; } #endif #ifdef CONFIG_IP_VS_PROTO_UDP if (u->udp_timeout) { - pd = ip_vs_proto_data_get(net, IPPROTO_UDP); - pd->timeout_table[IP_VS_UDP_S_NORMAL] + ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] = u->udp_timeout * HZ; } #endif @@ -2153,7 +2087,6 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, static int do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { - struct net *net = sock_net(sk); int ret; unsigned char arg[MAX_ARG_LEN]; struct ip_vs_service_user *usvc_compat; @@ -2188,20 +2121,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) if (cmd == IP_VS_SO_SET_FLUSH) { /* Flush the virtual service */ - ret = ip_vs_flush(net); + ret = ip_vs_flush(); goto out_unlock; } else if (cmd == IP_VS_SO_SET_TIMEOUT) { /* Set timeout values for (tcp tcpfin udp) */ - ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); + ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg); goto out_unlock; } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = start_sync_thread(net, dm->state, dm->mcast_ifn, - dm->syncid); + ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid); goto out_unlock; } else if (cmd == IP_VS_SO_SET_STOPDAEMON) { struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; - ret = stop_sync_thread(net, dm->state); + ret = stop_sync_thread(dm->state); goto out_unlock; } @@ -2216,7 +2148,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) if (cmd == IP_VS_SO_SET_ZERO) { /* if no service address is set, zero counters in all */ if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) { - ret = ip_vs_zero_all(net); + ret = ip_vs_zero_all(); goto out_unlock; } } @@ -2233,10 +2165,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* Lookup the exact service by or fwmark */ if (usvc.fwmark == 0) - svc = __ip_vs_service_find(net, usvc.af, usvc.protocol, + svc = __ip_vs_service_find(usvc.af, usvc.protocol, &usvc.addr, usvc.port); else - svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark); + svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark); if (cmd != IP_VS_SO_SET_ADD && (svc == NULL || svc->protocol != usvc.protocol)) { @@ -2249,7 +2181,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) if (svc != NULL) ret = -EEXIST; else - ret = ip_vs_add_service(net, &usvc, &svc); + ret = ip_vs_add_service(&usvc, &svc); break; case IP_VS_SO_SET_EDIT: ret = ip_vs_edit_service(svc, &usvc); @@ -2309,8 +2241,7 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) } static inline int -__ip_vs_get_service_entries(struct net *net, - const struct ip_vs_get_services *get, +__ip_vs_get_service_entries(const struct ip_vs_get_services *get, struct ip_vs_get_services __user *uptr) { int idx, count=0; @@ -2321,7 +2252,7 @@ __ip_vs_get_service_entries(struct net *net, for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { /* Only expose IPv4 entries to old interface */ - if (svc->af != AF_INET || !net_eq(svc->net, net)) + if (svc->af != AF_INET) continue; if (count >= get->num_services) @@ -2340,7 +2271,7 @@ __ip_vs_get_service_entries(struct net *net, for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { /* Only expose IPv4 entries to old interface */ - if (svc->af != AF_INET || !net_eq(svc->net, net)) + if (svc->af != AF_INET) continue; if (count >= get->num_services) @@ -2360,7 +2291,7 @@ __ip_vs_get_service_entries(struct net *net, } static inline int -__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, +__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, struct ip_vs_get_dests __user *uptr) { struct ip_vs_service *svc; @@ -2368,9 +2299,9 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, int ret = 0; if (get->fwmark) - svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark); + svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark); else - svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr, + svc = __ip_vs_service_find(AF_INET, get->protocol, &addr, get->port); if (svc) { @@ -2405,21 +2336,17 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, } static inline void -__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u) +__ip_vs_get_timeouts(struct ip_vs_timeout_user *u) { -#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) - struct ip_vs_proto_data *pd; -#endif - #ifdef CONFIG_IP_VS_PROTO_TCP - pd = ip_vs_proto_data_get(net, IPPROTO_TCP); - u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; - u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; + u->tcp_timeout = + ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; + u->tcp_fin_timeout = + ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; #endif #ifdef CONFIG_IP_VS_PROTO_UDP - pd = ip_vs_proto_data_get(net, IPPROTO_UDP); u->udp_timeout = - pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ; + ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ; #endif } @@ -2448,10 +2375,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) unsigned char arg[128]; int ret = 0; unsigned int copylen; - struct net *net = sock_net(sk); - struct netns_ipvs *ipvs = net_ipvs(net); - BUG_ON(!net); if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -2494,7 +2418,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) struct ip_vs_getinfo info; info.version = IP_VS_VERSION_CODE; info.size = ip_vs_conn_tab_size; - info.num_services = ipvs->num_services; + info.num_services = ip_vs_num_services; if (copy_to_user(user, &info, sizeof(info)) != 0) ret = -EFAULT; } @@ -2513,7 +2437,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EINVAL; goto out; } - ret = __ip_vs_get_service_entries(net, get, user); + ret = __ip_vs_get_service_entries(get, user); } break; @@ -2526,11 +2450,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) entry = (struct ip_vs_service_entry *)arg; addr.ip = entry->addr; if (entry->fwmark) - svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark); + svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark); else - svc = __ip_vs_service_find(net, AF_INET, - entry->protocol, &addr, - entry->port); + svc = __ip_vs_service_find(AF_INET, entry->protocol, + &addr, entry->port); if (svc) { ip_vs_copy_service(entry, svc); if (copy_to_user(user, entry, sizeof(*entry)) != 0) @@ -2553,7 +2476,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EINVAL; goto out; } - ret = __ip_vs_get_dest_entries(net, get, user); + ret = __ip_vs_get_dest_entries(get, user); } break; @@ -2561,7 +2484,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct ip_vs_timeout_user t; - __ip_vs_get_timeouts(net, &t); + __ip_vs_get_timeouts(&t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; } @@ -2572,17 +2495,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) struct ip_vs_daemon_user d[2]; memset(&d, 0, sizeof(d)); - if (ipvs->sync_state & IP_VS_STATE_MASTER) { + if (ip_vs_sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; - strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, - sizeof(d[0].mcast_ifn)); - d[0].syncid = ipvs->master_syncid; + strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn)); + d[0].syncid = ip_vs_master_syncid; } - if (ipvs->sync_state & IP_VS_STATE_BACKUP) { + if (ip_vs_sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; - strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, - sizeof(d[1].mcast_ifn)); - d[1].syncid = ipvs->backup_syncid; + strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn)); + d[1].syncid = ip_vs_backup_syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) ret = -EFAULT; @@ -2621,7 +2542,6 @@ static struct genl_family ip_vs_genl_family = { .name = IPVS_GENL_NAME, .version = IPVS_GENL_VERSION, .maxattr = IPVS_CMD_MAX, - .netnsok = true, /* Make ipvsadm to work on netns */ }; /* Policy used for first-level command attributes */ @@ -2776,12 +2696,11 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb, int idx = 0, i; int start = cb->args[0]; struct ip_vs_service *svc; - struct net *net = skb_sknet(skb); mutex_lock(&__ip_vs_mutex); for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { - if (++idx <= start || !net_eq(svc->net, net)) + if (++idx <= start) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; @@ -2792,7 +2711,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb, for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { - if (++idx <= start || !net_eq(svc->net, net)) + if (++idx <= start) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; @@ -2808,8 +2727,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb, return skb->len; } -static int ip_vs_genl_parse_service(struct net *net, - struct ip_vs_service_user_kern *usvc, +static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, struct nlattr *nla, int full_entry, struct ip_vs_service **ret_svc) { @@ -2852,9 +2770,9 @@ static int ip_vs_genl_parse_service(struct net *net, } if (usvc->fwmark) - svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark); + svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark); else - svc = __ip_vs_service_find(net, usvc->af, usvc->protocol, + svc = __ip_vs_service_find(usvc->af, usvc->protocol, &usvc->addr, usvc->port); *ret_svc = svc; @@ -2891,14 +2809,13 @@ static int ip_vs_genl_parse_service(struct net *net, return 0; } -static struct ip_vs_service *ip_vs_genl_find_service(struct net *net, - struct nlattr *nla) +static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla) { struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; int ret; - ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc); + ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc); return ret ? ERR_PTR(ret) : svc; } @@ -2966,7 +2883,6 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb, struct ip_vs_service *svc; struct ip_vs_dest *dest; struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; - struct net *net = skb_sknet(skb); mutex_lock(&__ip_vs_mutex); @@ -2975,8 +2891,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb, IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy)) goto out_err; - - svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]); + svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR(svc) || svc == NULL) goto out_err; @@ -3090,23 +3005,20 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state, static int ip_vs_genl_dump_daemons(struct sk_buff *skb, struct netlink_callback *cb) { - struct net *net = skb_net(skb); - struct netns_ipvs *ipvs = net_ipvs(net); - mutex_lock(&__ip_vs_mutex); - if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { + if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, - ipvs->master_mcast_ifn, - ipvs->master_syncid, cb) < 0) + ip_vs_master_mcast_ifn, + ip_vs_master_syncid, cb) < 0) goto nla_put_failure; cb->args[0] = 1; } - if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { + if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP, - ipvs->backup_mcast_ifn, - ipvs->backup_syncid, cb) < 0) + ip_vs_backup_mcast_ifn, + ip_vs_backup_syncid, cb) < 0) goto nla_put_failure; cb->args[1] = 1; @@ -3118,33 +3030,31 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, return skb->len; } -static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs) +static int ip_vs_genl_new_daemon(struct nlattr **attrs) { if (!(attrs[IPVS_DAEMON_ATTR_STATE] && attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && attrs[IPVS_DAEMON_ATTR_SYNC_ID])) return -EINVAL; - return start_sync_thread(net, - nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), + return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID])); } -static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs) +static int ip_vs_genl_del_daemon(struct nlattr **attrs) { if (!attrs[IPVS_DAEMON_ATTR_STATE]) return -EINVAL; - return stop_sync_thread(net, - nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); + return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); } -static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) +static int ip_vs_genl_set_config(struct nlattr **attrs) { struct ip_vs_timeout_user t; - __ip_vs_get_timeouts(net, &t); + __ip_vs_get_timeouts(&t); if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); @@ -3156,7 +3066,7 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); - return ip_vs_set_timeout(net, &t); + return ip_vs_set_timeout(&t); } static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) @@ -3166,20 +3076,16 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) struct ip_vs_dest_user_kern udest; int ret = 0, cmd; int need_full_svc = 0, need_full_dest = 0; - struct net *net; - struct netns_ipvs *ipvs; - net = skb_sknet(skb); - ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; mutex_lock(&__ip_vs_mutex); if (cmd == IPVS_CMD_FLUSH) { - ret = ip_vs_flush(net); + ret = ip_vs_flush(); goto out; } else if (cmd == IPVS_CMD_SET_CONFIG) { - ret = ip_vs_genl_set_config(net, info->attrs); + ret = ip_vs_genl_set_config(info->attrs); goto out; } else if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { @@ -3195,13 +3101,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) } if (cmd == IPVS_CMD_NEW_DAEMON) - ret = ip_vs_genl_new_daemon(net, daemon_attrs); + ret = ip_vs_genl_new_daemon(daemon_attrs); else - ret = ip_vs_genl_del_daemon(net, daemon_attrs); + ret = ip_vs_genl_del_daemon(daemon_attrs); goto out; } else if (cmd == IPVS_CMD_ZERO && !info->attrs[IPVS_CMD_ATTR_SERVICE]) { - ret = ip_vs_zero_all(net); + ret = ip_vs_zero_all(); goto out; } @@ -3211,7 +3117,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE) need_full_svc = 1; - ret = ip_vs_genl_parse_service(net, &usvc, + ret = ip_vs_genl_parse_service(&usvc, info->attrs[IPVS_CMD_ATTR_SERVICE], need_full_svc, &svc); if (ret) @@ -3241,7 +3147,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) switch (cmd) { case IPVS_CMD_NEW_SERVICE: if (svc == NULL) - ret = ip_vs_add_service(net, &usvc, &svc); + ret = ip_vs_add_service(&usvc, &svc); else ret = -EEXIST; break; @@ -3279,11 +3185,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; void *reply; int ret, cmd, reply_cmd; - struct net *net; - struct netns_ipvs *ipvs; - net = skb_sknet(skb); - ipvs = net_ipvs(net); cmd = info->genlhdr->cmd; if (cmd == IPVS_CMD_GET_SERVICE) @@ -3312,8 +3214,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) { struct ip_vs_service *svc; - svc = ip_vs_genl_find_service(net, - info->attrs[IPVS_CMD_ATTR_SERVICE]); + svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR(svc)) { ret = PTR_ERR(svc); goto out_err; @@ -3333,7 +3234,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) { struct ip_vs_timeout_user t; - __ip_vs_get_timeouts(net, &t); + __ip_vs_get_timeouts(&t); #ifdef CONFIG_IP_VS_PROTO_TCP NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, @@ -3479,172 +3380,62 @@ static void ip_vs_genl_unregister(void) /* End of Generic Netlink interface definitions */ -/* - * per netns intit/exit func. - */ -int __net_init __ip_vs_control_init(struct net *net) -{ - int idx; - struct netns_ipvs *ipvs = net_ipvs(net); - struct ctl_table *tbl; - - atomic_set(&ipvs->dropentry, 0); - spin_lock_init(&ipvs->dropentry_lock); - spin_lock_init(&ipvs->droppacket_lock); - spin_lock_init(&ipvs->securetcp_lock); - ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); - - /* Initialize rs_table */ - for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) - INIT_LIST_HEAD(&ipvs->rs_table[idx]); - - INIT_LIST_HEAD(&ipvs->dest_trash); - atomic_set(&ipvs->ftpsvc_counter, 0); - atomic_set(&ipvs->nullsvc_counter, 0); - - /* procfs stats */ - ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL); - if (ipvs->tot_stats == NULL) { - pr_err("%s(): no memory.\n", __func__); - return -ENOMEM; - } - ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!ipvs->cpustats) { - pr_err("%s() alloc_percpu failed\n", __func__); - goto err_alloc; - } - spin_lock_init(&ipvs->tot_stats->lock); - - for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) - INIT_LIST_HEAD(&ipvs->rs_table[idx]); - - proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); - proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops); - proc_net_fops_create(net, "ip_vs_stats_percpu", 0, - &ip_vs_stats_percpu_fops); - - if (!net_eq(net, &init_net)) { - tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); - if (tbl == NULL) - goto err_dup; - } else - tbl = vs_vars; - /* Initialize sysctl defaults */ - idx = 0; - ipvs->sysctl_amemthresh = 1024; - tbl[idx++].data = &ipvs->sysctl_amemthresh; - ipvs->sysctl_am_droprate = 10; - tbl[idx++].data = &ipvs->sysctl_am_droprate; - tbl[idx++].data = &ipvs->sysctl_drop_entry; - tbl[idx++].data = &ipvs->sysctl_drop_packet; -#ifdef CONFIG_IP_VS_NFCT - tbl[idx++].data = &ipvs->sysctl_conntrack; -#endif - tbl[idx++].data = &ipvs->sysctl_secure_tcp; - ipvs->sysctl_snat_reroute = 1; - tbl[idx++].data = &ipvs->sysctl_snat_reroute; - ipvs->sysctl_sync_ver = 1; - tbl[idx++].data = &ipvs->sysctl_sync_ver; - tbl[idx++].data = &ipvs->sysctl_cache_bypass; - tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; - tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; - ipvs->sysctl_sync_threshold[0] = 3; - ipvs->sysctl_sync_threshold[1] = 50; - tbl[idx].data = &ipvs->sysctl_sync_threshold; - tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); - tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; - - - ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path, - tbl); - if (ipvs->sysctl_hdr == NULL) - goto err_reg; - ip_vs_new_estimator(net, ipvs->tot_stats); - ipvs->sysctl_tbl = tbl; - /* Schedule defense work */ - INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler); - schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD); - return 0; - -err_reg: - if (!net_eq(net, &init_net)) - kfree(tbl); -err_dup: - free_percpu(ipvs->cpustats); -err_alloc: - kfree(ipvs->tot_stats); - return -ENOMEM; -} - -static void __net_exit __ip_vs_control_cleanup(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - ip_vs_trash_cleanup(net); - ip_vs_kill_estimator(net, ipvs->tot_stats); - cancel_delayed_work_sync(&ipvs->defense_work); - cancel_work_sync(&ipvs->defense_work.work); - unregister_net_sysctl_table(ipvs->sysctl_hdr); - proc_net_remove(net, "ip_vs_stats_percpu"); - proc_net_remove(net, "ip_vs_stats"); - proc_net_remove(net, "ip_vs"); - free_percpu(ipvs->cpustats); - kfree(ipvs->tot_stats); -} - -static struct pernet_operations ipvs_control_ops = { - .init = __ip_vs_control_init, - .exit = __ip_vs_control_cleanup, -}; int __init ip_vs_control_init(void) { - int idx; int ret; + int idx; EnterFunction(2); - /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ + /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { INIT_LIST_HEAD(&ip_vs_svc_table[idx]); INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); } - - ret = register_pernet_subsys(&ipvs_control_ops); - if (ret) { - pr_err("cannot register namespace.\n"); - goto err; + for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) { + INIT_LIST_HEAD(&ip_vs_rtable[idx]); } - - smp_wmb(); /* Do we really need it now ? */ + smp_wmb(); ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); - goto err_net; + return ret; } ret = ip_vs_genl_register(); if (ret) { pr_err("cannot register Generic Netlink interface.\n"); nf_unregister_sockopt(&ip_vs_sockopts); - goto err_net; + return ret; } + proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); + proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); + + sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars); + + ip_vs_new_estimator(&ip_vs_stats); + + /* Hook the defense timer */ + schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); + LeaveFunction(2); return 0; - -err_net: - unregister_pernet_subsys(&ipvs_control_ops); -err: - return ret; } void ip_vs_control_cleanup(void) { EnterFunction(2); - unregister_pernet_subsys(&ipvs_control_ops); + ip_vs_trash_cleanup(); + cancel_delayed_work_sync(&defense_work); + cancel_work_sync(&defense_work.work); + ip_vs_kill_estimator(&ip_vs_stats); + unregister_sysctl_table(sysctl_header); + proc_net_remove(&init_net, "ip_vs_stats"); + proc_net_remove(&init_net, "ip_vs"); ip_vs_genl_unregister(); nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); diff --git a/trunk/net/netfilter/ipvs/ip_vs_est.c b/trunk/net/netfilter/ipvs/ip_vs_est.c index f560a05c965a..ff28801962e0 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_est.c +++ b/trunk/net/netfilter/ipvs/ip_vs_est.c @@ -8,12 +8,8 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * Changes: Hans Schillstrom - * Network name space (netns) aware. - * Global data moved to netns i.e struct netns_ipvs - * Affected data: est_list and est_lock. - * estimation_timer() runs with timer per netns. - * get_stats()) do the per cpu summing. + * Changes: + * */ #define KMSG_COMPONENT "IPVS" @@ -52,42 +48,11 @@ */ -/* - * Make a summary from each cpu - */ -static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, - struct ip_vs_cpu_stats *stats) -{ - int i; - - for_each_possible_cpu(i) { - struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); - unsigned int start; - __u64 inbytes, outbytes; - if (i) { - sum->conns += s->ustats.conns; - sum->inpkts += s->ustats.inpkts; - sum->outpkts += s->ustats.outpkts; - do { - start = u64_stats_fetch_begin_bh(&s->syncp); - inbytes = s->ustats.inbytes; - outbytes = s->ustats.outbytes; - } while (u64_stats_fetch_retry_bh(&s->syncp, start)); - sum->inbytes += inbytes; - sum->outbytes += outbytes; - } else { - sum->conns = s->ustats.conns; - sum->inpkts = s->ustats.inpkts; - sum->outpkts = s->ustats.outpkts; - do { - start = u64_stats_fetch_begin_bh(&s->syncp); - sum->inbytes = s->ustats.inbytes; - sum->outbytes = s->ustats.outbytes; - } while (u64_stats_fetch_retry_bh(&s->syncp, start)); - } - } -} +static void estimation_timer(unsigned long arg); +static LIST_HEAD(est_list); +static DEFINE_SPINLOCK(est_lock); +static DEFINE_TIMER(est_timer, estimation_timer, 0, 0); static void estimation_timer(unsigned long arg) { @@ -97,16 +62,11 @@ static void estimation_timer(unsigned long arg) u32 n_inpkts, n_outpkts; u64 n_inbytes, n_outbytes; u32 rate; - struct net *net = (struct net *)arg; - struct netns_ipvs *ipvs; - ipvs = net_ipvs(net); - ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats); - spin_lock(&ipvs->est_lock); - list_for_each_entry(e, &ipvs->est_list, list) { + spin_lock(&est_lock); + list_for_each_entry(e, &est_list, list) { s = container_of(e, struct ip_vs_stats, est); - ip_vs_read_cpu_stats(&s->ustats, s->cpustats); spin_lock(&s->lock); n_conns = s->ustats.conns; n_inpkts = s->ustats.inpkts; @@ -115,39 +75,38 @@ static void estimation_timer(unsigned long arg) n_outbytes = s->ustats.outbytes; /* scaled by 2^10, but divided 2 seconds */ - rate = (n_conns - e->last_conns) << 9; + rate = (n_conns - e->last_conns)<<9; e->last_conns = n_conns; - e->cps += ((long)rate - (long)e->cps) >> 2; - s->ustats.cps = (e->cps + 0x1FF) >> 10; + e->cps += ((long)rate - (long)e->cps)>>2; + s->ustats.cps = (e->cps+0x1FF)>>10; - rate = (n_inpkts - e->last_inpkts) << 9; + rate = (n_inpkts - e->last_inpkts)<<9; e->last_inpkts = n_inpkts; - e->inpps += ((long)rate - (long)e->inpps) >> 2; - s->ustats.inpps = (e->inpps + 0x1FF) >> 10; + e->inpps += ((long)rate - (long)e->inpps)>>2; + s->ustats.inpps = (e->inpps+0x1FF)>>10; - rate = (n_outpkts - e->last_outpkts) << 9; + rate = (n_outpkts - e->last_outpkts)<<9; e->last_outpkts = n_outpkts; - e->outpps += ((long)rate - (long)e->outpps) >> 2; - s->ustats.outpps = (e->outpps + 0x1FF) >> 10; + e->outpps += ((long)rate - (long)e->outpps)>>2; + s->ustats.outpps = (e->outpps+0x1FF)>>10; - rate = (n_inbytes - e->last_inbytes) << 4; + rate = (n_inbytes - e->last_inbytes)<<4; e->last_inbytes = n_inbytes; - e->inbps += ((long)rate - (long)e->inbps) >> 2; - s->ustats.inbps = (e->inbps + 0xF) >> 5; + e->inbps += ((long)rate - (long)e->inbps)>>2; + s->ustats.inbps = (e->inbps+0xF)>>5; - rate = (n_outbytes - e->last_outbytes) << 4; + rate = (n_outbytes - e->last_outbytes)<<4; e->last_outbytes = n_outbytes; - e->outbps += ((long)rate - (long)e->outbps) >> 2; - s->ustats.outbps = (e->outbps + 0xF) >> 5; + e->outbps += ((long)rate - (long)e->outbps)>>2; + s->ustats.outbps = (e->outbps+0xF)>>5; spin_unlock(&s->lock); } - spin_unlock(&ipvs->est_lock); - mod_timer(&ipvs->est_timer, jiffies + 2*HZ); + spin_unlock(&est_lock); + mod_timer(&est_timer, jiffies + 2*HZ); } -void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats) +void ip_vs_new_estimator(struct ip_vs_stats *stats) { - struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_estimator *est = &stats->est; INIT_LIST_HEAD(&est->list); @@ -167,19 +126,18 @@ void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats) est->last_outbytes = stats->ustats.outbytes; est->outbps = stats->ustats.outbps<<5; - spin_lock_bh(&ipvs->est_lock); - list_add(&est->list, &ipvs->est_list); - spin_unlock_bh(&ipvs->est_lock); + spin_lock_bh(&est_lock); + list_add(&est->list, &est_list); + spin_unlock_bh(&est_lock); } -void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats) +void ip_vs_kill_estimator(struct ip_vs_stats *stats) { - struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_estimator *est = &stats->est; - spin_lock_bh(&ipvs->est_lock); + spin_lock_bh(&est_lock); list_del(&est->list); - spin_unlock_bh(&ipvs->est_lock); + spin_unlock_bh(&est_lock); } void ip_vs_zero_estimator(struct ip_vs_stats *stats) @@ -199,35 +157,13 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats) est->outbps = 0; } -static int __net_init __ip_vs_estimator_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - INIT_LIST_HEAD(&ipvs->est_list); - spin_lock_init(&ipvs->est_lock); - setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net); - mod_timer(&ipvs->est_timer, jiffies + 2 * HZ); - return 0; -} - -static void __net_exit __ip_vs_estimator_exit(struct net *net) -{ - del_timer_sync(&net_ipvs(net)->est_timer); -} -static struct pernet_operations ip_vs_app_ops = { - .init = __ip_vs_estimator_init, - .exit = __ip_vs_estimator_exit, -}; - int __init ip_vs_estimator_init(void) { - int rv; - - rv = register_pernet_subsys(&ip_vs_app_ops); - return rv; + mod_timer(&est_timer, jiffies + 2 * HZ); + return 0; } void ip_vs_estimator_cleanup(void) { - unregister_pernet_subsys(&ip_vs_app_ops); + del_timer_sync(&est_timer); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_ftp.c b/trunk/net/netfilter/ipvs/ip_vs_ftp.c index 6b5dd6ddaae9..75455000ad1c 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ftp.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ftp.c @@ -157,7 +157,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; - struct net *net; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -198,20 +197,18 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, */ { struct ip_vs_conn_param p; - ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, - iph->protocol, &from, port, - &cp->caddr, 0, &p); + ip_vs_conn_fill_param(AF_INET, iph->protocol, + &from, port, &cp->caddr, 0, &p); n_cp = ip_vs_conn_out_get(&p); } if (!n_cp) { struct ip_vs_conn_param p; - ip_vs_conn_fill_param(ip_vs_conn_net(cp), - AF_INET, IPPROTO_TCP, &cp->caddr, + ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr, 0, &cp->vaddr, port, &p); n_cp = ip_vs_conn_new(&p, &from, port, IP_VS_CONN_F_NO_CPORT | IP_VS_CONN_F_NFCT, - cp->dest, skb->mark); + cp->dest); if (!n_cp) return 0; @@ -260,9 +257,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, * would be adjusted twice. */ - net = skb_net(skb); cp->app_data = NULL; - ip_vs_tcp_conn_listen(net, n_cp); + ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return ret; } @@ -291,7 +287,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; - struct net *net; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -363,15 +358,14 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, { struct ip_vs_conn_param p; - ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, - iph->protocol, &to, port, &cp->vaddr, - htons(ntohs(cp->vport)-1), &p); + ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port, + &cp->vaddr, htons(ntohs(cp->vport)-1), + &p); n_cp = ip_vs_conn_in_get(&p); if (!n_cp) { n_cp = ip_vs_conn_new(&p, &cp->daddr, htons(ntohs(cp->dport)-1), - IP_VS_CONN_F_NFCT, cp->dest, - skb->mark); + IP_VS_CONN_F_NFCT, cp->dest); if (!n_cp) return 0; @@ -383,8 +377,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, /* * Move tunnel to listen state */ - net = skb_net(skb); - ip_vs_tcp_conn_listen(net, n_cp); + ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return 1; @@ -405,22 +398,23 @@ static struct ip_vs_app ip_vs_ftp = { .pkt_in = ip_vs_ftp_in, }; + /* - * per netns ip_vs_ftp initialization + * ip_vs_ftp initialization */ -static int __net_init __ip_vs_ftp_init(struct net *net) +static int __init ip_vs_ftp_init(void) { int i, ret; struct ip_vs_app *app = &ip_vs_ftp; - ret = register_ip_vs_app(net, app); + ret = register_ip_vs_app(app); if (ret) return ret; for (i=0; iprotocol, ports[i]); + ret = register_ip_vs_app_inc(app, app->protocol, ports[i]); if (ret) break; pr_info("%s: loaded support on port[%d] = %d\n", @@ -428,39 +422,18 @@ static int __net_init __ip_vs_ftp_init(struct net *net) } if (ret) - unregister_ip_vs_app(net, app); + unregister_ip_vs_app(app); return ret; } -/* - * netns exit - */ -static void __ip_vs_ftp_exit(struct net *net) -{ - struct ip_vs_app *app = &ip_vs_ftp; - - unregister_ip_vs_app(net, app); -} - -static struct pernet_operations ip_vs_ftp_ops = { - .init = __ip_vs_ftp_init, - .exit = __ip_vs_ftp_exit, -}; -int __init ip_vs_ftp_init(void) -{ - int rv; - - rv = register_pernet_subsys(&ip_vs_ftp_ops); - return rv; -} /* * ip_vs_ftp finish. */ static void __exit ip_vs_ftp_exit(void) { - unregister_pernet_subsys(&ip_vs_ftp_ops); + unregister_ip_vs_app(&ip_vs_ftp); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_lblc.c b/trunk/net/netfilter/ipvs/ip_vs_lblc.c index d5bec3371871..9323f8944199 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_lblc.c +++ b/trunk/net/netfilter/ipvs/ip_vs_lblc.c @@ -70,6 +70,7 @@ * entries that haven't been touched for a day. */ #define COUNT_FOR_FULL_EXPIRATION 30 +static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ; /* @@ -116,7 +117,7 @@ struct ip_vs_lblc_table { static ctl_table vs_vars_table[] = { { .procname = "lblc_expiration", - .data = NULL, + .data = &sysctl_ip_vs_lblc_expiration, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, @@ -124,6 +125,8 @@ static ctl_table vs_vars_table[] = { { } }; +static struct ctl_table_header * sysctl_header; + static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) { list_del(&en->list); @@ -245,7 +248,6 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) struct ip_vs_lblc_entry *en, *nxt; unsigned long now = jiffies; int i, j; - struct netns_ipvs *ipvs = net_ipvs(svc->net); for (i=0, j=tbl->rover; isched_lock); list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { if (time_before(now, - en->lastuse + - ipvs->sysctl_lblc_expiration)) + en->lastuse + sysctl_ip_vs_lblc_expiration)) continue; ip_vs_lblc_free(en); @@ -542,73 +543,23 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = .schedule = ip_vs_lblc_schedule, }; -/* - * per netns init. - */ -static int __net_init __ip_vs_lblc_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - if (!net_eq(net, &init_net)) { - ipvs->lblc_ctl_table = kmemdup(vs_vars_table, - sizeof(vs_vars_table), - GFP_KERNEL); - if (ipvs->lblc_ctl_table == NULL) - goto err_dup; - } else - ipvs->lblc_ctl_table = vs_vars_table; - ipvs->sysctl_lblc_expiration = 24*60*60*HZ; - ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration; - - ipvs->lblc_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblc_ctl_table); - if (!ipvs->lblc_ctl_header) - goto err_reg; - - return 0; - -err_reg: - if (!net_eq(net, &init_net)) - kfree(ipvs->lblc_ctl_table); - -err_dup: - return -ENOMEM; -} - -static void __net_exit __ip_vs_lblc_exit(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - unregister_net_sysctl_table(ipvs->lblc_ctl_header); - - if (!net_eq(net, &init_net)) - kfree(ipvs->lblc_ctl_table); -} - -static struct pernet_operations ip_vs_lblc_ops = { - .init = __ip_vs_lblc_init, - .exit = __ip_vs_lblc_exit, -}; static int __init ip_vs_lblc_init(void) { int ret; - ret = register_pernet_subsys(&ip_vs_lblc_ops); - if (ret) - return ret; - + sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); if (ret) - unregister_pernet_subsys(&ip_vs_lblc_ops); + unregister_sysctl_table(sysctl_header); return ret; } + static void __exit ip_vs_lblc_cleanup(void) { + unregister_sysctl_table(sysctl_header); unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); - unregister_pernet_subsys(&ip_vs_lblc_ops); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_lblcr.c b/trunk/net/netfilter/ipvs/ip_vs_lblcr.c index 61ae8cfcf0b4..dbeed8ea421a 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/trunk/net/netfilter/ipvs/ip_vs_lblcr.c @@ -70,6 +70,8 @@ * entries that haven't been touched for a day. */ #define COUNT_FOR_FULL_EXPIRATION 30 +static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ; + /* * for IPVS lblcr entry hash table @@ -294,7 +296,7 @@ struct ip_vs_lblcr_table { static ctl_table vs_vars_table[] = { { .procname = "lblcr_expiration", - .data = NULL, + .data = &sysctl_ip_vs_lblcr_expiration, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, @@ -302,6 +304,8 @@ static ctl_table vs_vars_table[] = { { } }; +static struct ctl_table_header * sysctl_header; + static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) { list_del(&en->list); @@ -421,15 +425,14 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) unsigned long now = jiffies; int i, j; struct ip_vs_lblcr_entry *en, *nxt; - struct netns_ipvs *ipvs = net_ipvs(svc->net); for (i=0, j=tbl->rover; isched_lock); list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { - if (time_after(en->lastuse - + ipvs->sysctl_lblcr_expiration, now)) + if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, + now)) continue; ip_vs_lblcr_free(en); @@ -661,7 +664,6 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) read_lock(&svc->sched_lock); en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr); if (en) { - struct netns_ipvs *ipvs = net_ipvs(svc->net); /* We only hold a read lock, but this is atomic */ en->lastuse = jiffies; @@ -673,7 +675,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) /* More than one destination + enough time passed by, cleanup */ if (atomic_read(&en->set.size) > 1 && time_after(jiffies, en->set.lastmod + - ipvs->sysctl_lblcr_expiration)) { + sysctl_ip_vs_lblcr_expiration)) { struct ip_vs_dest *m; write_lock(&en->set.lock); @@ -742,73 +744,23 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = .schedule = ip_vs_lblcr_schedule, }; -/* - * per netns init. - */ -static int __net_init __ip_vs_lblcr_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - if (!net_eq(net, &init_net)) { - ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, - sizeof(vs_vars_table), - GFP_KERNEL); - if (ipvs->lblcr_ctl_table == NULL) - goto err_dup; - } else - ipvs->lblcr_ctl_table = vs_vars_table; - ipvs->sysctl_lblcr_expiration = 24*60*60*HZ; - ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; - - ipvs->lblcr_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblcr_ctl_table); - if (!ipvs->lblcr_ctl_header) - goto err_reg; - - return 0; - -err_reg: - if (!net_eq(net, &init_net)) - kfree(ipvs->lblcr_ctl_table); - -err_dup: - return -ENOMEM; -} - -static void __net_exit __ip_vs_lblcr_exit(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - unregister_net_sysctl_table(ipvs->lblcr_ctl_header); - - if (!net_eq(net, &init_net)) - kfree(ipvs->lblcr_ctl_table); -} - -static struct pernet_operations ip_vs_lblcr_ops = { - .init = __ip_vs_lblcr_init, - .exit = __ip_vs_lblcr_exit, -}; static int __init ip_vs_lblcr_init(void) { int ret; - ret = register_pernet_subsys(&ip_vs_lblcr_ops); - if (ret) - return ret; - + sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); if (ret) - unregister_pernet_subsys(&ip_vs_lblcr_ops); + unregister_sysctl_table(sysctl_header); return ret; } + static void __exit ip_vs_lblcr_cleanup(void) { + unregister_sysctl_table(sysctl_header); unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); - unregister_pernet_subsys(&ip_vs_lblcr_ops); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_nfct.c b/trunk/net/netfilter/ipvs/ip_vs_nfct.c index f454c80df0a7..4680647cd450 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_nfct.c +++ b/trunk/net/netfilter/ipvs/ip_vs_nfct.c @@ -141,7 +141,6 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct, struct nf_conntrack_tuple *orig, new_reply; struct ip_vs_conn *cp; struct ip_vs_conn_param p; - struct net *net = nf_ct_net(ct); if (exp->tuple.src.l3num != PF_INET) return; @@ -156,7 +155,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct, /* RS->CLIENT */ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; - ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum, + ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum, &orig->src.u3, orig->src.u.tcp.port, &orig->dst.u3, orig->dst.u.tcp.port, &p); cp = ip_vs_conn_out_get(&p); @@ -269,8 +268,7 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) " for conn " FMT_CONN "\n", __func__, ARG_TUPLE(&tuple), ARG_CONN(cp)); - h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE, - &tuple); + h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); /* Show what happens instead of calling nf_ct_kill() */ diff --git a/trunk/net/netfilter/ipvs/ip_vs_pe.c b/trunk/net/netfilter/ipvs/ip_vs_pe.c index 5cf859ccb31b..3414af70ee12 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_pe.c +++ b/trunk/net/netfilter/ipvs/ip_vs_pe.c @@ -29,11 +29,12 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc) } /* Get pe in the pe list by name */ -struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) +static struct ip_vs_pe * +ip_vs_pe_getbyname(const char *pe_name) { struct ip_vs_pe *pe; - IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__, + IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__, pe_name); spin_lock_bh(&ip_vs_pe_lock); @@ -59,22 +60,28 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) } /* Lookup pe and try to load it if it doesn't exist */ -struct ip_vs_pe *ip_vs_pe_getbyname(const char *name) +struct ip_vs_pe *ip_vs_pe_get(const char *name) { struct ip_vs_pe *pe; /* Search for the pe by name */ - pe = __ip_vs_pe_getbyname(name); + pe = ip_vs_pe_getbyname(name); /* If pe not found, load the module and search again */ if (!pe) { request_module("ip_vs_pe_%s", name); - pe = __ip_vs_pe_getbyname(name); + pe = ip_vs_pe_getbyname(name); } return pe; } +void ip_vs_pe_put(struct ip_vs_pe *pe) +{ + if (pe && pe->module) + module_put(pe->module); +} + /* Register a pe in the pe list */ int register_ip_vs_pe(struct ip_vs_pe *pe) { diff --git a/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c b/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c index 0d83bc01fed4..b8b4e9620f3e 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/trunk/net/netfilter/ipvs/ip_vs_pe_sip.c @@ -71,7 +71,6 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) struct ip_vs_iphdr iph; unsigned int dataoff, datalen, matchoff, matchlen; const char *dptr; - int retc; ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph); @@ -84,8 +83,6 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) if (dataoff >= skb->len) return -EINVAL; - if ((retc=skb_linearize(skb)) < 0) - return retc; dptr = skb->data + dataoff; datalen = skb->len - dataoff; diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto.c b/trunk/net/netfilter/ipvs/ip_vs_proto.c index 17484a4416ef..c53998390877 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto.c @@ -60,35 +60,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) return 0; } -#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \ - defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \ - defined(CONFIG_IP_VS_PROTO_ESP) -/* - * register an ipvs protocols netns related data - */ -static int -register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); - struct ip_vs_proto_data *pd = - kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); - - if (!pd) { - pr_err("%s(): no memory.\n", __func__); - return -ENOMEM; - } - pd->pp = pp; /* For speed issues */ - pd->next = ipvs->proto_data_table[hash]; - ipvs->proto_data_table[hash] = pd; - atomic_set(&pd->appcnt, 0); /* Init app counter */ - - if (pp->init_netns != NULL) - pp->init_netns(net, pd); - - return 0; -} -#endif /* * unregister an ipvs protocol @@ -111,29 +82,6 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) return -ESRCH; } -/* - * unregister an ipvs protocols netns data - */ -static int -unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data **pd_p; - unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); - - pd_p = &ipvs->proto_data_table[hash]; - for (; *pd_p; pd_p = &(*pd_p)->next) { - if (*pd_p == pd) { - *pd_p = pd->next; - if (pd->pp->exit_netns != NULL) - pd->pp->exit_netns(net, pd); - kfree(pd); - return 0; - } - } - - return -ESRCH; -} /* * get ip_vs_protocol object by its proto. @@ -152,44 +100,19 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) } EXPORT_SYMBOL(ip_vs_proto_get); -/* - * get ip_vs_protocol object data by netns and proto - */ -struct ip_vs_proto_data * -__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) -{ - struct ip_vs_proto_data *pd; - unsigned hash = IP_VS_PROTO_HASH(proto); - - for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { - if (pd->pp->protocol == proto) - return pd; - } - - return NULL; -} - -struct ip_vs_proto_data * -ip_vs_proto_data_get(struct net *net, unsigned short proto) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - return __ipvs_proto_data_get(ipvs, proto); -} -EXPORT_SYMBOL(ip_vs_proto_data_get); /* * Propagate event for state change to all protocols */ -void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) +void ip_vs_protocol_timeout_change(int flags) { - struct ip_vs_proto_data *pd; + struct ip_vs_protocol *pp; int i; for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { - for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { - if (pd->pp->timeout_change) - pd->pp->timeout_change(pd, flags); + for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) { + if (pp->timeout_change) + pp->timeout_change(pp, flags); } } } @@ -313,46 +236,6 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); } -/* - * per network name-space init - */ -static int __net_init __ip_vs_protocol_init(struct net *net) -{ -#ifdef CONFIG_IP_VS_PROTO_TCP - register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); -#endif -#ifdef CONFIG_IP_VS_PROTO_UDP - register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); -#endif -#ifdef CONFIG_IP_VS_PROTO_SCTP - register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); -#endif -#ifdef CONFIG_IP_VS_PROTO_AH - register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); -#endif -#ifdef CONFIG_IP_VS_PROTO_ESP - register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); -#endif - return 0; -} - -static void __net_exit __ip_vs_protocol_cleanup(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd; - int i; - - /* unregister all the ipvs proto data for this netns */ - for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { - while ((pd = ipvs->proto_data_table[i]) != NULL) - unregister_ip_vs_proto_netns(net, pd); - } -} - -static struct pernet_operations ipvs_proto_ops = { - .init = __ip_vs_protocol_init, - .exit = __ip_vs_protocol_cleanup, -}; int __init ip_vs_protocol_init(void) { @@ -382,7 +265,6 @@ int __init ip_vs_protocol_init(void) REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); - return register_pernet_subsys(&ipvs_proto_ops); return 0; } @@ -393,7 +275,6 @@ void ip_vs_protocol_cleanup(void) struct ip_vs_protocol *pp; int i; - unregister_pernet_subsys(&ipvs_proto_ops); /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/trunk/net/netfilter/ipvs/ip_vs_proto_ah_esp.c index 5b8eb8b12c3e..3a0461117d3f 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto_ah_esp.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto_ah_esp.c @@ -41,30 +41,28 @@ struct isakmp_hdr { #define PORT_ISAKMP 500 static void -ah_esp_conn_fill_param_proto(struct net *net, int af, - const struct ip_vs_iphdr *iph, int inverse, - struct ip_vs_conn_param *p) +ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph, + int inverse, struct ip_vs_conn_param *p) { if (likely(!inverse)) - ip_vs_conn_fill_param(net, af, IPPROTO_UDP, + ip_vs_conn_fill_param(af, IPPROTO_UDP, &iph->saddr, htons(PORT_ISAKMP), &iph->daddr, htons(PORT_ISAKMP), p); else - ip_vs_conn_fill_param(net, af, IPPROTO_UDP, + ip_vs_conn_fill_param(af, IPPROTO_UDP, &iph->daddr, htons(PORT_ISAKMP), &iph->saddr, htons(PORT_ISAKMP), p); } static struct ip_vs_conn * -ah_esp_conn_in_get(int af, const struct sk_buff *skb, +ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { struct ip_vs_conn *cp; struct ip_vs_conn_param p; - struct net *net = skb_net(skb); - ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p); + ah_esp_conn_fill_param_proto(af, iph, inverse, &p); cp = ip_vs_conn_in_get(&p); if (!cp) { /* @@ -74,7 +72,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet " "%s%s %s->%s\n", inverse ? "ICMP+" : "", - ip_vs_proto_get(iph->protocol)->name, + pp->name, IP_VS_DBG_ADDR(af, &iph->saddr), IP_VS_DBG_ADDR(af, &iph->daddr)); } @@ -85,21 +83,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, static struct ip_vs_conn * ah_esp_conn_out_get(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { struct ip_vs_conn *cp; struct ip_vs_conn_param p; - struct net *net = skb_net(skb); - ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p); + ah_esp_conn_fill_param_proto(af, iph, inverse, &p); cp = ip_vs_conn_out_get(&p); if (!cp) { IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet " "%s%s %s->%s\n", inverse ? "ICMP+" : "", - ip_vs_proto_get(iph->protocol)->name, + pp->name, IP_VS_DBG_ADDR(af, &iph->saddr), IP_VS_DBG_ADDR(af, &iph->daddr)); } @@ -109,7 +107,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb, static int -ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) { /* @@ -119,14 +117,26 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, return 0; } +static void ah_esp_init(struct ip_vs_protocol *pp) +{ + /* nothing to do now */ +} + + +static void ah_esp_exit(struct ip_vs_protocol *pp) +{ + /* nothing to do now */ +} + + #ifdef CONFIG_IP_VS_PROTO_AH struct ip_vs_protocol ip_vs_protocol_ah = { .name = "AH", .protocol = IPPROTO_AH, .num_states = 1, .dont_defrag = 1, - .init = NULL, - .exit = NULL, + .init = ah_esp_init, + .exit = ah_esp_exit, .conn_schedule = ah_esp_conn_schedule, .conn_in_get = ah_esp_conn_in_get, .conn_out_get = ah_esp_conn_out_get, @@ -139,6 +149,7 @@ struct ip_vs_protocol ip_vs_protocol_ah = { .app_conn_bind = NULL, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = NULL, /* ISAKMP */ + .set_state_timeout = NULL, }; #endif @@ -148,8 +159,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = { .protocol = IPPROTO_ESP, .num_states = 1, .dont_defrag = 1, - .init = NULL, - .exit = NULL, + .init = ah_esp_init, + .exit = ah_esp_exit, .conn_schedule = ah_esp_conn_schedule, .conn_in_get = ah_esp_conn_in_get, .conn_out_get = ah_esp_conn_out_get, diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto_sctp.c b/trunk/net/netfilter/ipvs/ip_vs_proto_sctp.c index fb2d04ac5d4e..1ea96bcd342b 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -9,10 +9,9 @@ #include static int -sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) { - struct net *net; struct ip_vs_service *svc; sctp_chunkhdr_t _schunkh, *sch; sctp_sctphdr_t *sh, _sctph; @@ -28,13 +27,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, sizeof(_schunkh), &_schunkh); if (sch == NULL) return 0; - net = skb_net(skb); + if ((sch->type == SCTP_CID_INIT) && - (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, + (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, sh->dest))) { int ignored; - if (ip_vs_todrop(net_ipvs(net))) { + if (ip_vs_todrop()) { /* * It seems that we are very loaded. * We have to drop this packet :( @@ -47,19 +46,14 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); - if (!*cpp && ignored <= 0) { - if (!ignored) - *verdict = ip_vs_leave(svc, skb, pd); - else { - ip_vs_service_put(svc); - *verdict = NF_DROP; - } + *cpp = ip_vs_schedule(svc, skb, pp, &ignored); + if (!*cpp && !ignored) { + *verdict = ip_vs_leave(svc, skb, pp); return 0; } ip_vs_service_put(svc); } - /* NF_ACCEPT */ + return 1; } @@ -862,7 +856,7 @@ static struct ipvs_sctp_nextstate /* * Timeout table[state] */ -static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = { +static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = { [IP_VS_SCTP_S_NONE] = 2 * HZ, [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ, [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ, @@ -906,8 +900,20 @@ static const char *sctp_state_name(int state) return "?"; } +static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags) +{ +} + +static int +sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to) +{ + +return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST, + sctp_state_name_table, sname, to); +} + static inline int -set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, +set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int direction, const struct sk_buff *skb) { sctp_chunkhdr_t _sctpch, *sch; @@ -965,7 +971,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, IP_VS_DBG_BUF(8, "%s %s %s:%d->" "%s:%d state: %s->%s conn->refcnt:%d\n", - pd->pp->name, + pp->name, ((direction == IP_VS_DIR_OUTPUT) ? "output " : "input "), IP_VS_DBG_ADDR(cp->af, &cp->daddr), @@ -989,73 +995,75 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, } } } - if (likely(pd)) - cp->timeout = pd->timeout_table[cp->state = next_state]; - else /* What to do ? */ - cp->timeout = sctp_timeouts[cp->state = next_state]; - return 1; + cp->timeout = pp->timeout_table[cp->state = next_state]; + + return 1; } static int sctp_state_transition(struct ip_vs_conn *cp, int direction, - const struct sk_buff *skb, struct ip_vs_proto_data *pd) + const struct sk_buff *skb, struct ip_vs_protocol *pp) { int ret = 0; spin_lock(&cp->lock); - ret = set_sctp_state(pd, cp, direction, skb); + ret = set_sctp_state(pp, cp, direction, skb); spin_unlock(&cp->lock); return ret; } +/* + * Hash table for SCTP application incarnations + */ +#define SCTP_APP_TAB_BITS 4 +#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) +#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) + +static struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; +static DEFINE_SPINLOCK(sctp_app_lock); + static inline __u16 sctp_app_hashkey(__be16 port) { return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port) & SCTP_APP_TAB_MASK; } -static int sctp_register_app(struct net *net, struct ip_vs_app *inc) +static int sctp_register_app(struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP); hash = sctp_app_hashkey(port); - spin_lock_bh(&ipvs->sctp_app_lock); - list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) { + spin_lock_bh(&sctp_app_lock); + list_for_each_entry(i, &sctp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } - list_add(&inc->p_list, &ipvs->sctp_apps[hash]); - atomic_inc(&pd->appcnt); + list_add(&inc->p_list, &sctp_apps[hash]); + atomic_inc(&ip_vs_protocol_sctp.appcnt); out: - spin_unlock_bh(&ipvs->sctp_app_lock); + spin_unlock_bh(&sctp_app_lock); return ret; } -static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc) +static void sctp_unregister_app(struct ip_vs_app *inc) { - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP); - - spin_lock_bh(&ipvs->sctp_app_lock); - atomic_dec(&pd->appcnt); + spin_lock_bh(&sctp_app_lock); + atomic_dec(&ip_vs_protocol_sctp.appcnt); list_del(&inc->p_list); - spin_unlock_bh(&ipvs->sctp_app_lock); + spin_unlock_bh(&sctp_app_lock); } static int sctp_app_conn_bind(struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); int hash; struct ip_vs_app *inc; int result = 0; @@ -1066,12 +1074,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = sctp_app_hashkey(cp->vport); - spin_lock(&ipvs->sctp_app_lock); - list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) { + spin_lock(&sctp_app_lock); + list_for_each_entry(inc, &sctp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - spin_unlock(&ipvs->sctp_app_lock); + spin_unlock(&sctp_app_lock); IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -1087,50 +1095,43 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp) goto out; } } - spin_unlock(&ipvs->sctp_app_lock); + spin_unlock(&sctp_app_lock); out: return result; } -/* --------------------------------------------- - * timeouts is netns related now. - * --------------------------------------------- - */ -static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) +static void ip_vs_sctp_init(struct ip_vs_protocol *pp) { - struct netns_ipvs *ipvs = net_ipvs(net); - - ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE); - spin_lock_init(&ipvs->tcp_app_lock); - pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, - sizeof(sctp_timeouts)); + IP_VS_INIT_HASH_TABLE(sctp_apps); + pp->timeout_table = sctp_timeouts; } -static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) + +static void ip_vs_sctp_exit(struct ip_vs_protocol *pp) { - kfree(pd->timeout_table); + } struct ip_vs_protocol ip_vs_protocol_sctp = { - .name = "SCTP", - .protocol = IPPROTO_SCTP, - .num_states = IP_VS_SCTP_S_LAST, - .dont_defrag = 0, - .init = NULL, - .exit = NULL, - .init_netns = __ip_vs_sctp_init, - .exit_netns = __ip_vs_sctp_exit, - .register_app = sctp_register_app, + .name = "SCTP", + .protocol = IPPROTO_SCTP, + .num_states = IP_VS_SCTP_S_LAST, + .dont_defrag = 0, + .appcnt = ATOMIC_INIT(0), + .init = ip_vs_sctp_init, + .exit = ip_vs_sctp_exit, + .register_app = sctp_register_app, .unregister_app = sctp_unregister_app, - .conn_schedule = sctp_conn_schedule, - .conn_in_get = ip_vs_conn_in_get_proto, - .conn_out_get = ip_vs_conn_out_get_proto, - .snat_handler = sctp_snat_handler, - .dnat_handler = sctp_dnat_handler, - .csum_check = sctp_csum_check, - .state_name = sctp_state_name, + .conn_schedule = sctp_conn_schedule, + .conn_in_get = ip_vs_conn_in_get_proto, + .conn_out_get = ip_vs_conn_out_get_proto, + .snat_handler = sctp_snat_handler, + .dnat_handler = sctp_dnat_handler, + .csum_check = sctp_csum_check, + .state_name = sctp_state_name, .state_transition = sctp_state_transition, - .app_conn_bind = sctp_app_conn_bind, - .debug_packet = ip_vs_tcpudp_debug_packet, - .timeout_change = NULL, + .app_conn_bind = sctp_app_conn_bind, + .debug_packet = ip_vs_tcpudp_debug_packet, + .timeout_change = sctp_timeout_change, + .set_state_timeout = sctp_set_state_timeout, }; diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto_tcp.c b/trunk/net/netfilter/ipvs/ip_vs_proto_tcp.c index c0cc341b840d..f6c5200e2146 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -9,12 +9,8 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * Changes: Hans Schillstrom + * Changes: * - * Network name space (netns) aware. - * Global data moved to netns i.e struct netns_ipvs - * tcp_timeouts table has copy per netns in a hash table per - * protocol ip_vs_proto_data and is handled by netns */ #define KMSG_COMPONENT "IPVS" @@ -32,10 +28,9 @@ #include static int -tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) { - struct net *net; struct ip_vs_service *svc; struct tcphdr _tcph, *th; struct ip_vs_iphdr iph; @@ -47,14 +42,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, *verdict = NF_DROP; return 0; } - net = skb_net(skb); + /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ if (th->syn && - (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, - &iph.daddr, th->dest))) { + (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, + th->dest))) { int ignored; - if (ip_vs_todrop(net_ipvs(net))) { + if (ip_vs_todrop()) { /* * It seems that we are very loaded. * We have to drop this packet :( @@ -68,19 +63,13 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); - if (!*cpp && ignored <= 0) { - if (!ignored) - *verdict = ip_vs_leave(svc, skb, pd); - else { - ip_vs_service_put(svc); - *verdict = NF_DROP; - } + *cpp = ip_vs_schedule(svc, skb, pp, &ignored); + if (!*cpp && !ignored) { + *verdict = ip_vs_leave(svc, skb, pp); return 0; } ip_vs_service_put(svc); } - /* NF_ACCEPT */ return 1; } @@ -349,7 +338,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = { /* * Timeout table[state] */ -static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = { +static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = { [IP_VS_TCP_S_NONE] = 2*HZ, [IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ, [IP_VS_TCP_S_SYN_SENT] = 2*60*HZ, @@ -448,7 +437,10 @@ static struct tcp_states_t tcp_states_dos [] = { /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; -static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) +static struct tcp_states_t *tcp_state_table = tcp_states; + + +static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags) { int on = (flags & 1); /* secure_tcp */ @@ -458,7 +450,14 @@ static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) ** for most if not for all of the applications. Something ** like "capabilities" (flags) for each object. */ - pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); + tcp_state_table = (on? tcp_states_dos : tcp_states); +} + +static int +tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to) +{ + return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST, + tcp_state_name_table, sname, to); } static inline int tcp_state_idx(struct tcphdr *th) @@ -475,7 +474,7 @@ static inline int tcp_state_idx(struct tcphdr *th) } static inline void -set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, +set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int direction, struct tcphdr *th) { int state_idx; @@ -498,8 +497,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, goto tcp_state_out; } - new_state = - pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; + new_state = tcp_state_table[state_off+state_idx].next_state[cp->state]; tcp_state_out: if (new_state != cp->state) { @@ -507,7 +505,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->" "%s:%d state: %s->%s conn->refcnt:%d\n", - pd->pp->name, + pp->name, ((state_off == TCP_DIR_OUTPUT) ? "output " : "input "), th->syn ? 'S' : '.', @@ -537,19 +535,17 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, } } - if (likely(pd)) - cp->timeout = pd->timeout_table[cp->state = new_state]; - else /* What to do ? */ - cp->timeout = tcp_timeouts[cp->state = new_state]; + cp->timeout = pp->timeout_table[cp->state = new_state]; } + /* * Handle state transitions */ static int tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, - struct ip_vs_proto_data *pd) + struct ip_vs_protocol *pp) { struct tcphdr _tcph, *th; @@ -564,12 +560,23 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, return 0; spin_lock(&cp->lock); - set_tcp_state(pd, cp, direction, th); + set_tcp_state(pp, cp, direction, th); spin_unlock(&cp->lock); return 1; } + +/* + * Hash table for TCP application incarnations + */ +#define TCP_APP_TAB_BITS 4 +#define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) +#define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) + +static struct list_head tcp_apps[TCP_APP_TAB_SIZE]; +static DEFINE_SPINLOCK(tcp_app_lock); + static inline __u16 tcp_app_hashkey(__be16 port) { return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port) @@ -577,50 +584,44 @@ static inline __u16 tcp_app_hashkey(__be16 port) } -static int tcp_register_app(struct net *net, struct ip_vs_app *inc) +static int tcp_register_app(struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); hash = tcp_app_hashkey(port); - spin_lock_bh(&ipvs->tcp_app_lock); - list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) { + spin_lock_bh(&tcp_app_lock); + list_for_each_entry(i, &tcp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } - list_add(&inc->p_list, &ipvs->tcp_apps[hash]); - atomic_inc(&pd->appcnt); + list_add(&inc->p_list, &tcp_apps[hash]); + atomic_inc(&ip_vs_protocol_tcp.appcnt); out: - spin_unlock_bh(&ipvs->tcp_app_lock); + spin_unlock_bh(&tcp_app_lock); return ret; } static void -tcp_unregister_app(struct net *net, struct ip_vs_app *inc) +tcp_unregister_app(struct ip_vs_app *inc) { - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); - - spin_lock_bh(&ipvs->tcp_app_lock); - atomic_dec(&pd->appcnt); + spin_lock_bh(&tcp_app_lock); + atomic_dec(&ip_vs_protocol_tcp.appcnt); list_del(&inc->p_list); - spin_unlock_bh(&ipvs->tcp_app_lock); + spin_unlock_bh(&tcp_app_lock); } static int tcp_app_conn_bind(struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); int hash; struct ip_vs_app *inc; int result = 0; @@ -632,12 +633,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = tcp_app_hashkey(cp->vport); - spin_lock(&ipvs->tcp_app_lock); - list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) { + spin_lock(&tcp_app_lock); + list_for_each_entry(inc, &tcp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - spin_unlock(&ipvs->tcp_app_lock); + spin_unlock(&tcp_app_lock); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -654,7 +655,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) goto out; } } - spin_unlock(&ipvs->tcp_app_lock); + spin_unlock(&tcp_app_lock); out: return result; @@ -664,35 +665,24 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) /* * Set LISTEN timeout. (ip_vs_conn_put will setup timer) */ -void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp) +void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp) { - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); - spin_lock(&cp->lock); cp->state = IP_VS_TCP_S_LISTEN; - cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] - : tcp_timeouts[IP_VS_TCP_S_LISTEN]); + cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN]; spin_unlock(&cp->lock); } -/* --------------------------------------------- - * timeouts is netns related now. - * --------------------------------------------- - */ -static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE); - spin_lock_init(&ipvs->tcp_app_lock); - pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, - sizeof(tcp_timeouts)); - pd->tcp_state_table = tcp_states; +static void ip_vs_tcp_init(struct ip_vs_protocol *pp) +{ + IP_VS_INIT_HASH_TABLE(tcp_apps); + pp->timeout_table = tcp_timeouts; } -static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) + +static void ip_vs_tcp_exit(struct ip_vs_protocol *pp) { - kfree(pd->timeout_table); } @@ -701,10 +691,9 @@ struct ip_vs_protocol ip_vs_protocol_tcp = { .protocol = IPPROTO_TCP, .num_states = IP_VS_TCP_S_LAST, .dont_defrag = 0, - .init = NULL, - .exit = NULL, - .init_netns = __ip_vs_tcp_init, - .exit_netns = __ip_vs_tcp_exit, + .appcnt = ATOMIC_INIT(0), + .init = ip_vs_tcp_init, + .exit = ip_vs_tcp_exit, .register_app = tcp_register_app, .unregister_app = tcp_unregister_app, .conn_schedule = tcp_conn_schedule, @@ -718,4 +707,5 @@ struct ip_vs_protocol ip_vs_protocol_tcp = { .app_conn_bind = tcp_app_conn_bind, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = tcp_timeout_change, + .set_state_timeout = tcp_set_state_timeout, }; diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto_udp.c b/trunk/net/netfilter/ipvs/ip_vs_proto_udp.c index f1282cbe6fe3..9d106a06bb0a 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -9,8 +9,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * Changes: Hans Schillstrom - * Network name space (netns) aware. + * Changes: * */ @@ -29,10 +28,9 @@ #include static int -udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) { - struct net *net; struct ip_vs_service *svc; struct udphdr _udph, *uh; struct ip_vs_iphdr iph; @@ -44,13 +42,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, *verdict = NF_DROP; return 0; } - net = skb_net(skb); - svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, + + svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, uh->dest); if (svc) { int ignored; - if (ip_vs_todrop(net_ipvs(net))) { + if (ip_vs_todrop()) { /* * It seems that we are very loaded. * We have to drop this packet :( @@ -64,19 +62,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); - if (!*cpp && ignored <= 0) { - if (!ignored) - *verdict = ip_vs_leave(svc, skb, pd); - else { - ip_vs_service_put(svc); - *verdict = NF_DROP; - } + *cpp = ip_vs_schedule(svc, skb, pp, &ignored); + if (!*cpp && !ignored) { + *verdict = ip_vs_leave(svc, skb, pp); return 0; } ip_vs_service_put(svc); } - /* NF_ACCEPT */ return 1; } @@ -346,6 +338,19 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) return 1; } + +/* + * Note: the caller guarantees that only one of register_app, + * unregister_app or app_conn_bind is called each time. + */ + +#define UDP_APP_TAB_BITS 4 +#define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) +#define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) + +static struct list_head udp_apps[UDP_APP_TAB_SIZE]; +static DEFINE_SPINLOCK(udp_app_lock); + static inline __u16 udp_app_hashkey(__be16 port) { return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port) @@ -353,50 +358,44 @@ static inline __u16 udp_app_hashkey(__be16 port) } -static int udp_register_app(struct net *net, struct ip_vs_app *inc) +static int udp_register_app(struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP); hash = udp_app_hashkey(port); - spin_lock_bh(&ipvs->udp_app_lock); - list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) { + spin_lock_bh(&udp_app_lock); + list_for_each_entry(i, &udp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } - list_add(&inc->p_list, &ipvs->udp_apps[hash]); - atomic_inc(&pd->appcnt); + list_add(&inc->p_list, &udp_apps[hash]); + atomic_inc(&ip_vs_protocol_udp.appcnt); out: - spin_unlock_bh(&ipvs->udp_app_lock); + spin_unlock_bh(&udp_app_lock); return ret; } static void -udp_unregister_app(struct net *net, struct ip_vs_app *inc) +udp_unregister_app(struct ip_vs_app *inc) { - struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP); - struct netns_ipvs *ipvs = net_ipvs(net); - - spin_lock_bh(&ipvs->udp_app_lock); - atomic_dec(&pd->appcnt); + spin_lock_bh(&udp_app_lock); + atomic_dec(&ip_vs_protocol_udp.appcnt); list_del(&inc->p_list); - spin_unlock_bh(&ipvs->udp_app_lock); + spin_unlock_bh(&udp_app_lock); } static int udp_app_conn_bind(struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); int hash; struct ip_vs_app *inc; int result = 0; @@ -408,12 +407,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = udp_app_hashkey(cp->vport); - spin_lock(&ipvs->udp_app_lock); - list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) { + spin_lock(&udp_app_lock); + list_for_each_entry(inc, &udp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - spin_unlock(&ipvs->udp_app_lock); + spin_unlock(&udp_app_lock); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -430,14 +429,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) goto out; } } - spin_unlock(&ipvs->udp_app_lock); + spin_unlock(&udp_app_lock); out: return result; } -static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = { +static int udp_timeouts[IP_VS_UDP_S_LAST+1] = { [IP_VS_UDP_S_NORMAL] = 5*60*HZ, [IP_VS_UDP_S_LAST] = 2*HZ, }; @@ -447,6 +446,14 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = { [IP_VS_UDP_S_LAST] = "BUG!", }; + +static int +udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to) +{ + return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST, + udp_state_name_table, sname, to); +} + static const char * udp_state_name(int state) { if (state >= IP_VS_UDP_S_LAST) @@ -457,30 +464,20 @@ static const char * udp_state_name(int state) static int udp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, - struct ip_vs_proto_data *pd) + struct ip_vs_protocol *pp) { - if (unlikely(!pd)) { - pr_err("UDP no ns data\n"); - return 0; - } - - cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; + cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL]; return 1; } -static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) +static void udp_init(struct ip_vs_protocol *pp) { - struct netns_ipvs *ipvs = net_ipvs(net); - - ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE); - spin_lock_init(&ipvs->udp_app_lock); - pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, - sizeof(udp_timeouts)); + IP_VS_INIT_HASH_TABLE(udp_apps); + pp->timeout_table = udp_timeouts; } -static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) +static void udp_exit(struct ip_vs_protocol *pp) { - kfree(pd->timeout_table); } @@ -489,10 +486,8 @@ struct ip_vs_protocol ip_vs_protocol_udp = { .protocol = IPPROTO_UDP, .num_states = IP_VS_UDP_S_LAST, .dont_defrag = 0, - .init = NULL, - .exit = NULL, - .init_netns = __udp_init, - .exit_netns = __udp_exit, + .init = udp_init, + .exit = udp_exit, .conn_schedule = udp_conn_schedule, .conn_in_get = ip_vs_conn_in_get_proto, .conn_out_get = ip_vs_conn_out_get_proto, @@ -506,4 +501,5 @@ struct ip_vs_protocol ip_vs_protocol_udp = { .app_conn_bind = udp_app_conn_bind, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = NULL, + .set_state_timeout = udp_set_state_timeout, }; diff --git a/trunk/net/netfilter/ipvs/ip_vs_sync.c b/trunk/net/netfilter/ipvs/ip_vs_sync.c index d5a6e640ea45..ab85aedea17e 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_sync.c +++ b/trunk/net/netfilter/ipvs/ip_vs_sync.c @@ -5,18 +5,6 @@ * high-performance and highly available server based on a * cluster of servers. * - * Version 1, is capable of handling both version 0 and 1 messages. - * Version 0 is the plain old format. - * Note Version 0 receivers will just drop Ver 1 messages. - * Version 1 is capable of handle IPv6, Persistence data, - * time-outs, and firewall marks. - * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. - * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 - * - * Definitions Message: is a complete datagram - * Sync_conn: is a part of a Message - * Param Data is an option to a Sync_conn. - * * Authors: Wensong Zhang * * ip_vs_sync: sync connection info from master load balancer to backups @@ -27,8 +15,6 @@ * Alexandre Cassen : Added SyncID support for incoming sync * messages filtering. * Justin Ossevoort : Fix endian problem on sync message size. - * Hans Schillstrom : Added Version 1: i.e. IPv6, - * Persistence support, fwmark and time-out. */ #define KMSG_COMPONENT "IPVS" @@ -49,8 +35,6 @@ #include #include -#include /* Used for ntoh_seq and hton_seq */ - #include #include @@ -59,13 +43,11 @@ #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ #define IP_VS_SYNC_PORT 8848 /* multicast port */ -#define SYNC_PROTO_VER 1 /* Protocol version in header */ /* * IPVS sync connection entry - * Version 0, i.e. original version. */ -struct ip_vs_sync_conn_v0 { +struct ip_vs_sync_conn { __u8 reserved; /* Protocol, addresses and port numbers */ @@ -89,159 +71,41 @@ struct ip_vs_sync_conn_options { struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; -/* - Sync Connection format (sync_conn) - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Type | Protocol | Ver. | Size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Flags | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | State | cport | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | vport | dport | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | fwmark | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | timeout (in sec.) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | ... | - | IP-Addresses (v4 or v6) | - | ... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - Optional Parameters. - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Param. Type | Param. Length | Param. data | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | - | ... | - | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | Param Type | Param. Length | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Param data | - | Last Param data should be padded for 32 bit alignment | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -*/ - -/* - * Type 0, IPv4 sync connection format - */ -struct ip_vs_sync_v4 { - __u8 type; - __u8 protocol; /* Which protocol (TCP/UDP) */ - __be16 ver_size; /* Version msb 4 bits */ - /* Flags and state transition */ - __be32 flags; /* status flags */ - __be16 state; /* state info */ - /* Protocol, addresses and port numbers */ - __be16 cport; - __be16 vport; - __be16 dport; - __be32 fwmark; /* Firewall mark from skb */ - __be32 timeout; /* cp timeout */ - __be32 caddr; /* client address */ - __be32 vaddr; /* virtual address */ - __be32 daddr; /* destination address */ - /* The sequence options start here */ - /* PE data padded to 32bit alignment after seq. options */ -}; -/* - * Type 2 messages IPv6 - */ -struct ip_vs_sync_v6 { - __u8 type; - __u8 protocol; /* Which protocol (TCP/UDP) */ - __be16 ver_size; /* Version msb 4 bits */ - /* Flags and state transition */ - __be32 flags; /* status flags */ - __be16 state; /* state info */ - /* Protocol, addresses and port numbers */ - __be16 cport; - __be16 vport; - __be16 dport; - __be32 fwmark; /* Firewall mark from skb */ - __be32 timeout; /* cp timeout */ - struct in6_addr caddr; /* client address */ - struct in6_addr vaddr; /* virtual address */ - struct in6_addr daddr; /* destination address */ - /* The sequence options start here */ - /* PE data padded to 32bit alignment after seq. options */ -}; - -union ip_vs_sync_conn { - struct ip_vs_sync_v4 v4; - struct ip_vs_sync_v6 v6; -}; - -/* Bits in Type field in above */ -#define STYPE_INET6 0 -#define STYPE_F_INET6 (1 << STYPE_INET6) - -#define SVER_SHIFT 12 /* Shift to get version */ -#define SVER_MASK 0x0fff /* Mask to strip version */ - -#define IPVS_OPT_SEQ_DATA 1 -#define IPVS_OPT_PE_DATA 2 -#define IPVS_OPT_PE_NAME 3 -#define IPVS_OPT_PARAM 7 - -#define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) -#define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) -#define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) -#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) - struct ip_vs_sync_thread_data { - struct net *net; struct socket *sock; char *buf; }; -/* Version 0 definition of packet sizes */ -#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) +#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) #define FULL_CONN_SIZE \ -(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) +(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) /* - The master mulitcasts messages (Datagrams) to the backup load balancers - in the following format. - - Version 1: - Note, first byte should be Zero, so ver 0 receivers will drop the packet. + The master mulitcasts messages to the backup load balancers in the + following format. 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 0 | SyncID | Size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Count Conns | Version | Reserved, set to Zero | + | Count Conns | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | . | - ~ . ~ + | . | | . | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (n) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - Version 0 Header - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Count Conns | SyncID | Size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | IPVS Sync Connection (1) | */ #define SYNC_MESG_HEADER_LEN 4 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */ -/* Version 0 header */ -struct ip_vs_sync_mesg_v0 { +struct ip_vs_sync_mesg { __u8 nr_conns; __u8 syncid; __u16 size; @@ -249,16 +113,9 @@ struct ip_vs_sync_mesg_v0 { /* ip_vs_sync_conn entries start here */ }; -/* Version 1 header */ -struct ip_vs_sync_mesg { - __u8 reserved; /* must be zero */ - __u8 syncid; - __u16 size; - __u8 nr_conns; - __s8 version; /* SYNC_PROTO_VER */ - __u16 spare; - /* ip_vs_sync_conn entries start here */ -}; +/* the maximum length of sync (sending/receiving) message */ +static int sync_send_mesg_maxlen; +static int sync_recv_mesg_maxlen; struct ip_vs_sync_buff { struct list_head list; @@ -270,6 +127,28 @@ struct ip_vs_sync_buff { unsigned char *end; }; + +/* the sync_buff list head and the lock */ +static LIST_HEAD(ip_vs_sync_queue); +static DEFINE_SPINLOCK(ip_vs_sync_lock); + +/* current sync_buff for accepting new conn entries */ +static struct ip_vs_sync_buff *curr_sb = NULL; +static DEFINE_SPINLOCK(curr_sb_lock); + +/* ipvs sync daemon state */ +volatile int ip_vs_sync_state = IP_VS_STATE_NONE; +volatile int ip_vs_master_syncid = 0; +volatile int ip_vs_backup_syncid = 0; + +/* multicast interface name */ +char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; +char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; + +/* sync daemon tasks */ +static struct task_struct *sync_master_thread; +static struct task_struct *sync_backup_thread; + /* multicast addr */ static struct sockaddr_in mcast_addr = { .sin_family = AF_INET, @@ -277,71 +156,41 @@ static struct sockaddr_in mcast_addr = { .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; -/* - * Copy of struct ip_vs_seq - * From unaligned network order to aligned host order - */ -static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) -{ - ho->init_seq = get_unaligned_be32(&no->init_seq); - ho->delta = get_unaligned_be32(&no->delta); - ho->previous_delta = get_unaligned_be32(&no->previous_delta); -} - -/* - * Copy of struct ip_vs_seq - * From Aligned host order to unaligned network order - */ -static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) -{ - put_unaligned_be32(ho->init_seq, &no->init_seq); - put_unaligned_be32(ho->delta, &no->delta); - put_unaligned_be32(ho->previous_delta, &no->previous_delta); -} -static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs) +static inline struct ip_vs_sync_buff *sb_dequeue(void) { struct ip_vs_sync_buff *sb; - spin_lock_bh(&ipvs->sync_lock); - if (list_empty(&ipvs->sync_queue)) { + spin_lock_bh(&ip_vs_sync_lock); + if (list_empty(&ip_vs_sync_queue)) { sb = NULL; } else { - sb = list_entry(ipvs->sync_queue.next, + sb = list_entry(ip_vs_sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); } - spin_unlock_bh(&ipvs->sync_lock); + spin_unlock_bh(&ip_vs_sync_lock); return sb; } -/* - * Create a new sync buffer for Version 1 proto. - */ -static inline struct ip_vs_sync_buff * -ip_vs_sync_buff_create(struct netns_ipvs *ipvs) +static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void) { struct ip_vs_sync_buff *sb; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; - sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); - if (!sb->mesg) { + if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) { kfree(sb); return NULL; } - sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */ - sb->mesg->version = SYNC_PROTO_VER; - sb->mesg->syncid = ipvs->master_syncid; - sb->mesg->size = sizeof(struct ip_vs_sync_mesg); sb->mesg->nr_conns = 0; - sb->mesg->spare = 0; - sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); - sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen; - + sb->mesg->syncid = ip_vs_master_syncid; + sb->mesg->size = 4; + sb->head = (unsigned char *)sb->mesg + 4; + sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen; sb->firstuse = jiffies; return sb; } @@ -352,16 +201,14 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) kfree(sb); } -static inline void sb_queue_tail(struct netns_ipvs *ipvs) +static inline void sb_queue_tail(struct ip_vs_sync_buff *sb) { - struct ip_vs_sync_buff *sb = ipvs->sync_buff; - - spin_lock(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) - list_add_tail(&sb->list, &ipvs->sync_queue); + spin_lock(&ip_vs_sync_lock); + if (ip_vs_sync_state & IP_VS_STATE_MASTER) + list_add_tail(&sb->list, &ip_vs_sync_queue); else ip_vs_sync_buff_release(sb); - spin_unlock(&ipvs->sync_lock); + spin_unlock(&ip_vs_sync_lock); } /* @@ -369,101 +216,36 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs) * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * -get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) +get_curr_sync_buff(unsigned long time) { struct ip_vs_sync_buff *sb; - spin_lock_bh(&ipvs->sync_buff_lock); - if (ipvs->sync_buff && (time == 0 || - time_before(jiffies - ipvs->sync_buff->firstuse, time))) { - sb = ipvs->sync_buff; - ipvs->sync_buff = NULL; + spin_lock_bh(&curr_sb_lock); + if (curr_sb && (time == 0 || + time_before(jiffies - curr_sb->firstuse, time))) { + sb = curr_sb; + curr_sb = NULL; } else sb = NULL; - spin_unlock_bh(&ipvs->sync_buff_lock); + spin_unlock_bh(&curr_sb_lock); return sb; } -/* - * Switch mode from sending version 0 or 1 - * - must handle sync_buf - */ -void ip_vs_sync_switch_mode(struct net *net, int mode) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - if (!ipvs->sync_state & IP_VS_STATE_MASTER) - return; - if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff) - return; - - spin_lock_bh(&ipvs->sync_buff_lock); - /* Buffer empty ? then let buf_create do the job */ - if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) { - kfree(ipvs->sync_buff); - ipvs->sync_buff = NULL; - } else { - spin_lock_bh(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) - list_add_tail(&ipvs->sync_buff->list, - &ipvs->sync_queue); - else - ip_vs_sync_buff_release(ipvs->sync_buff); - spin_unlock_bh(&ipvs->sync_lock); - } - spin_unlock_bh(&ipvs->sync_buff_lock); -} /* - * Create a new sync buffer for Version 0 proto. - */ -static inline struct ip_vs_sync_buff * -ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) -{ - struct ip_vs_sync_buff *sb; - struct ip_vs_sync_mesg_v0 *mesg; - - if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) - return NULL; - - sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); - if (!sb->mesg) { - kfree(sb); - return NULL; - } - mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; - mesg->nr_conns = 0; - mesg->syncid = ipvs->master_syncid; - mesg->size = sizeof(struct ip_vs_sync_mesg_v0); - sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); - sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen; - sb->firstuse = jiffies; - return sb; -} - -/* - * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. + * Called by ip_vs_in. */ -void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) +void ip_vs_sync_conn(struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_sync_mesg_v0 *m; - struct ip_vs_sync_conn_v0 *s; + struct ip_vs_sync_mesg *m; + struct ip_vs_sync_conn *s; int len; - if (unlikely(cp->af != AF_INET)) - return; - /* Do not sync ONE PACKET */ - if (cp->flags & IP_VS_CONN_F_ONE_PACKET) - return; - - spin_lock(&ipvs->sync_buff_lock); - if (!ipvs->sync_buff) { - ipvs->sync_buff = - ip_vs_sync_buff_create_v0(ipvs); - if (!ipvs->sync_buff) { - spin_unlock(&ipvs->sync_buff_lock); + spin_lock(&curr_sb_lock); + if (!curr_sb) { + if (!(curr_sb=ip_vs_sync_buff_create())) { + spin_unlock(&curr_sb_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } @@ -471,11 +253,10 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; - m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg; - s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head; + m = curr_sb->mesg; + s = (struct ip_vs_sync_conn *)curr_sb->head; /* copy members */ - s->reserved = 0; s->protocol = cp->protocol; s->cport = cp->cport; s->vport = cp->vport; @@ -493,366 +274,83 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) m->nr_conns++; m->size += len; - ipvs->sync_buff->head += len; + curr_sb->head += len; /* check if there is a space for next one */ - if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; + if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) { + sb_queue_tail(curr_sb); + curr_sb = NULL; } - spin_unlock(&ipvs->sync_buff_lock); + spin_unlock(&curr_sb_lock); /* synchronize its controller if it has */ if (cp->control) - ip_vs_sync_conn(net, cp->control); -} - -/* - * Add an ip_vs_conn information into the current sync_buff. - * Called by ip_vs_in. - * Sending Version 1 messages - */ -void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_sync_mesg *m; - union ip_vs_sync_conn *s; - __u8 *p; - unsigned int len, pe_name_len, pad; - - /* Handle old version of the protocol */ - if (ipvs->sysctl_sync_ver == 0) { - ip_vs_sync_conn_v0(net, cp); - return; - } - /* Do not sync ONE PACKET */ - if (cp->flags & IP_VS_CONN_F_ONE_PACKET) - goto control; -sloop: - /* Sanity checks */ - pe_name_len = 0; - if (cp->pe_data_len) { - if (!cp->pe_data || !cp->dest) { - IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); - return; - } - pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); - } - - spin_lock(&ipvs->sync_buff_lock); - -#ifdef CONFIG_IP_VS_IPV6 - if (cp->af == AF_INET6) - len = sizeof(struct ip_vs_sync_v6); - else -#endif - len = sizeof(struct ip_vs_sync_v4); - - if (cp->flags & IP_VS_CONN_F_SEQ_MASK) - len += sizeof(struct ip_vs_sync_conn_options) + 2; - - if (cp->pe_data_len) - len += cp->pe_data_len + 2; /* + Param hdr field */ - if (pe_name_len) - len += pe_name_len + 2; - - /* check if there is a space for this one */ - pad = 0; - if (ipvs->sync_buff) { - pad = (4 - (size_t)ipvs->sync_buff->head) & 3; - if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; - pad = 0; - } - } - - if (!ipvs->sync_buff) { - ipvs->sync_buff = ip_vs_sync_buff_create(ipvs); - if (!ipvs->sync_buff) { - spin_unlock(&ipvs->sync_buff_lock); - pr_err("ip_vs_sync_buff_create failed.\n"); - return; - } - } - - m = ipvs->sync_buff->mesg; - p = ipvs->sync_buff->head; - ipvs->sync_buff->head += pad + len; - m->size += pad + len; - /* Add ev. padding from prev. sync_conn */ - while (pad--) - *(p++) = 0; - - s = (union ip_vs_sync_conn *)p; - - /* Set message type & copy members */ - s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); - s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ - s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); - s->v4.state = htons(cp->state); - s->v4.protocol = cp->protocol; - s->v4.cport = cp->cport; - s->v4.vport = cp->vport; - s->v4.dport = cp->dport; - s->v4.fwmark = htonl(cp->fwmark); - s->v4.timeout = htonl(cp->timeout / HZ); - m->nr_conns++; - -#ifdef CONFIG_IP_VS_IPV6 - if (cp->af == AF_INET6) { - p += sizeof(struct ip_vs_sync_v6); - ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6); - ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6); - ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6); - } else -#endif - { - p += sizeof(struct ip_vs_sync_v4); /* options ptr */ - s->v4.caddr = cp->caddr.ip; - s->v4.vaddr = cp->vaddr.ip; - s->v4.daddr = cp->daddr.ip; - } - if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { - *(p++) = IPVS_OPT_SEQ_DATA; - *(p++) = sizeof(struct ip_vs_sync_conn_options); - hton_seq((struct ip_vs_seq *)p, &cp->in_seq); - p += sizeof(struct ip_vs_seq); - hton_seq((struct ip_vs_seq *)p, &cp->out_seq); - p += sizeof(struct ip_vs_seq); - } - /* Handle pe data */ - if (cp->pe_data_len && cp->pe_data) { - *(p++) = IPVS_OPT_PE_DATA; - *(p++) = cp->pe_data_len; - memcpy(p, cp->pe_data, cp->pe_data_len); - p += cp->pe_data_len; - if (pe_name_len) { - /* Add PE_NAME */ - *(p++) = IPVS_OPT_PE_NAME; - *(p++) = pe_name_len; - memcpy(p, cp->pe->name, pe_name_len); - p += pe_name_len; - } - } - - spin_unlock(&ipvs->sync_buff_lock); - -control: - /* synchronize its controller if it has */ - cp = cp->control; - if (!cp) - return; - /* - * Reduce sync rate for templates - * i.e only increment in_pkts for Templates. - */ - if (cp->flags & IP_VS_CONN_F_TEMPLATE) { - int pkts = atomic_add_return(1, &cp->in_pkts); - - if (pkts % ipvs->sysctl_sync_threshold[1] != 1) - return; - } - goto sloop; + ip_vs_sync_conn(cp->control); } -/* - * fill_param used by version 1 - */ static inline int -ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, - struct ip_vs_conn_param *p, - __u8 *pe_data, unsigned int pe_data_len, - __u8 *pe_name, unsigned int pe_name_len) +ip_vs_conn_fill_param_sync(int af, int protocol, + const union nf_inet_addr *caddr, __be16 cport, + const union nf_inet_addr *vaddr, __be16 vport, + struct ip_vs_conn_param *p) { -#ifdef CONFIG_IP_VS_IPV6 - if (af == AF_INET6) - ip_vs_conn_fill_param(net, af, sc->v6.protocol, - (const union nf_inet_addr *)&sc->v6.caddr, - sc->v6.cport, - (const union nf_inet_addr *)&sc->v6.vaddr, - sc->v6.vport, p); - else -#endif - ip_vs_conn_fill_param(net, af, sc->v4.protocol, - (const union nf_inet_addr *)&sc->v4.caddr, - sc->v4.cport, - (const union nf_inet_addr *)&sc->v4.vaddr, - sc->v4.vport, p); - /* Handle pe data */ - if (pe_data_len) { - if (pe_name_len) { - char buff[IP_VS_PENAME_MAXLEN+1]; - - memcpy(buff, pe_name, pe_name_len); - buff[pe_name_len]=0; - p->pe = __ip_vs_pe_getbyname(buff); - if (!p->pe) { - IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", - buff); - return 1; - } - } else { - IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); - return 1; - } - - p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC); - if (!p->pe_data) { - if (p->pe->module) - module_put(p->pe->module); - return -ENOMEM; - } - memcpy(p->pe_data, pe_data, pe_data_len); - p->pe_data_len = pe_data_len; - } + /* XXX: Need to take into account persistence engine */ + ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p); return 0; } /* - * Connection Add / Update. - * Common for version 0 and 1 reception of backup sync_conns. - * Param: ... - * timeout is in sec. + * Process received multicast message and create the corresponding + * ip_vs_conn entries. */ -static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, - unsigned int flags, unsigned int state, - unsigned int protocol, unsigned int type, - const union nf_inet_addr *daddr, __be16 dport, - unsigned long timeout, __u32 fwmark, - struct ip_vs_sync_conn_options *opt) +static void ip_vs_process_message(const char *buffer, const size_t buflen) { - struct ip_vs_dest *dest; + struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer; + struct ip_vs_sync_conn *s; + struct ip_vs_sync_conn_options *opt; struct ip_vs_conn *cp; - struct netns_ipvs *ipvs = net_ipvs(net); - - if (!(flags & IP_VS_CONN_F_TEMPLATE)) - cp = ip_vs_conn_in_get(param); - else - cp = ip_vs_ct_in_get(param); - - if (cp && param->pe_data) /* Free pe_data */ - kfree(param->pe_data); - if (!cp) { - /* - * Find the appropriate destination for the connection. - * If it is not found the connection will remain unbound - * but still handled. - */ - dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, - param->vport, protocol, fwmark); + struct ip_vs_protocol *pp; + struct ip_vs_dest *dest; + struct ip_vs_conn_param param; + char *p; + int i; - /* Set the approprite ativity flag */ - if (protocol == IPPROTO_TCP) { - if (state != IP_VS_TCP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } else if (protocol == IPPROTO_SCTP) { - if (state != IP_VS_SCTP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } - cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); - if (dest) - atomic_dec(&dest->refcnt); - if (!cp) { - if (param->pe_data) - kfree(param->pe_data); - IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); - return; - } - } else if (!cp->dest) { - dest = ip_vs_try_bind_dest(cp); - if (dest) - atomic_dec(&dest->refcnt); - } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) && - (cp->state != state)) { - /* update active/inactive flag for the connection */ - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_TCP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags |= IP_VS_CONN_F_INACTIVE; - } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && - (state == IP_VS_TCP_S_ESTABLISHED)) { - atomic_inc(&dest->activeconns); - atomic_dec(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } - } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) && - (cp->state != state)) { - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_SCTP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } + if (buflen < sizeof(struct ip_vs_sync_mesg)) { + IP_VS_ERR_RL("sync message header too short\n"); + return; } - if (opt) - memcpy(&cp->in_seq, opt, sizeof(*opt)); - atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]); - cp->state = state; - cp->old_state = cp->state; - /* - * For Ver 0 messages style - * - Not possible to recover the right timeout for templates - * - can not find the right fwmark - * virtual service. If needed, we can do it for - * non-fwmark persistent services. - * Ver 1 messages style. - * - No problem. - */ - if (timeout) { - if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) - timeout = MAX_SCHEDULE_TIMEOUT / HZ; - cp->timeout = timeout*HZ; - } else { - struct ip_vs_proto_data *pd; + /* Convert size back to host byte order */ + m->size = ntohs(m->size); - pd = ip_vs_proto_data_get(net, protocol); - if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) - cp->timeout = pd->timeout_table[state]; - else - cp->timeout = (3*60*HZ); + if (buflen != m->size) { + IP_VS_ERR_RL("bogus sync message size\n"); + return; } - ip_vs_conn_put(cp); -} -/* - * Process received multicast message for Version 0 - */ -static void ip_vs_process_message_v0(struct net *net, const char *buffer, - const size_t buflen) -{ - struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; - struct ip_vs_sync_conn_v0 *s; - struct ip_vs_sync_conn_options *opt; - struct ip_vs_protocol *pp; - struct ip_vs_conn_param param; - char *p; - int i; + /* SyncID sanity check */ + if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) { + IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n", + m->syncid); + return; + } - p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); + p = (char *)buffer + sizeof(struct ip_vs_sync_mesg); for (i=0; inr_conns; i++) { unsigned flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { - IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); + IP_VS_ERR_RL("bogus conn in sync message\n"); return; } - s = (struct ip_vs_sync_conn_v0 *) p; + s = (struct ip_vs_sync_conn *) p; flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; flags &= ~IP_VS_CONN_F_HASHED; if (flags & IP_VS_CONN_F_SEQ_MASK) { opt = (struct ip_vs_sync_conn_options *)&s[1]; p += FULL_CONN_SIZE; if (p > buffer+buflen) { - IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); + IP_VS_ERR_RL("bogus conn options in sync message\n"); return; } } else { @@ -864,286 +362,118 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer, if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->protocol); if (!pp) { - IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", + IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n", s->protocol); continue; } if (state >= pp->num_states) { - IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", + IP_VS_DBG(2, "Invalid %s state %u in sync msg\n", pp->name, state); continue; } } else { /* protocol in templates is not used for state/timeout */ + pp = NULL; if (state > 0) { - IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", + IP_VS_DBG(2, "Invalid template state %u in sync msg\n", state); state = 0; } } - ip_vs_conn_fill_param(net, AF_INET, s->protocol, - (const union nf_inet_addr *)&s->caddr, - s->cport, - (const union nf_inet_addr *)&s->vaddr, - s->vport, ¶m); - - /* Send timeout as Zero */ - ip_vs_proc_conn(net, ¶m, flags, state, s->protocol, AF_INET, - (union nf_inet_addr *)&s->daddr, s->dport, - 0, 0, opt); - } -} - -/* - * Handle options - */ -static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, - __u32 *opt_flags, - struct ip_vs_sync_conn_options *opt) -{ - struct ip_vs_sync_conn_options *topt; - - topt = (struct ip_vs_sync_conn_options *)p; - - if (plen != sizeof(struct ip_vs_sync_conn_options)) { - IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); - return -EINVAL; - } - if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { - IP_VS_DBG(2, "BACKUP, conn options found twice\n"); - return -EINVAL; - } - ntoh_seq(&topt->in_seq, &opt->in_seq); - ntoh_seq(&topt->out_seq, &opt->out_seq); - *opt_flags |= IPVS_OPT_F_SEQ_DATA; - return 0; -} - -static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, - __u8 **data, unsigned int maxlen, - __u32 *opt_flags, __u32 flag) -{ - if (plen > maxlen) { - IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); - return -EINVAL; - } - if (*opt_flags & flag) { - IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); - return -EINVAL; - } - *data_len = plen; - *data = p; - *opt_flags |= flag; - return 0; -} -/* - * Process a Version 1 sync. connection - */ -static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) -{ - struct ip_vs_sync_conn_options opt; - union ip_vs_sync_conn *s; - struct ip_vs_protocol *pp; - struct ip_vs_conn_param param; - __u32 flags; - unsigned int af, state, pe_data_len=0, pe_name_len=0; - __u8 *pe_data=NULL, *pe_name=NULL; - __u32 opt_flags=0; - int retc=0; - - s = (union ip_vs_sync_conn *) p; - - if (s->v6.type & STYPE_F_INET6) { -#ifdef CONFIG_IP_VS_IPV6 - af = AF_INET6; - p += sizeof(struct ip_vs_sync_v6); -#else - IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); - retc = 10; - goto out; -#endif - } else if (!s->v4.type) { - af = AF_INET; - p += sizeof(struct ip_vs_sync_v4); - } else { - return -10; - } - if (p > msg_end) - return -20; - - /* Process optional params check Type & Len. */ - while (p < msg_end) { - int ptype; - int plen; - - if (p+2 > msg_end) - return -30; - ptype = *(p++); - plen = *(p++); - - if (!plen || ((p + plen) > msg_end)) - return -40; - /* Handle seq option p = param data */ - switch (ptype & ~IPVS_OPT_F_PARAM) { - case IPVS_OPT_SEQ_DATA: - if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) - return -50; - break; - - case IPVS_OPT_PE_DATA: - if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, - IP_VS_PEDATA_MAXLEN, &opt_flags, - IPVS_OPT_F_PE_DATA)) - return -60; - break; - - case IPVS_OPT_PE_NAME: - if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, - IP_VS_PENAME_MAXLEN, &opt_flags, - IPVS_OPT_F_PE_NAME)) - return -70; - break; - - default: - /* Param data mandatory ? */ - if (!(ptype & IPVS_OPT_F_PARAM)) { - IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", - ptype & ~IPVS_OPT_F_PARAM); - retc = 20; - goto out; + { + if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol, + (union nf_inet_addr *)&s->caddr, + s->cport, + (union nf_inet_addr *)&s->vaddr, + s->vport, ¶m)) { + pr_err("ip_vs_conn_fill_param_sync failed"); + return; } + if (!(flags & IP_VS_CONN_F_TEMPLATE)) + cp = ip_vs_conn_in_get(¶m); + else + cp = ip_vs_ct_in_get(¶m); } - p += plen; /* Next option */ - } - - /* Get flags and Mask off unsupported */ - flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; - flags |= IP_VS_CONN_F_SYNC; - state = ntohs(s->v4.state); - - if (!(flags & IP_VS_CONN_F_TEMPLATE)) { - pp = ip_vs_proto_get(s->v4.protocol); - if (!pp) { - IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", - s->v4.protocol); - retc = 30; - goto out; - } - if (state >= pp->num_states) { - IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", - pp->name, state); - retc = 40; - goto out; - } - } else { - /* protocol in templates is not used for state/timeout */ - if (state > 0) { - IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", - state); - state = 0; - } - } - if (ip_vs_conn_fill_param_sync(net, af, s, ¶m, pe_data, - pe_data_len, pe_name, pe_name_len)) { - retc = 50; - goto out; - } - /* If only IPv4, just silent skip IPv6 */ - if (af == AF_INET) - ip_vs_proc_conn(net, ¶m, flags, state, s->v4.protocol, af, - (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, - ntohl(s->v4.timeout), ntohl(s->v4.fwmark), - (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) - ); -#ifdef CONFIG_IP_VS_IPV6 - else - ip_vs_proc_conn(net, ¶m, flags, state, s->v6.protocol, af, - (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, - ntohl(s->v6.timeout), ntohl(s->v6.fwmark), - (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) - ); -#endif - return 0; - /* Error exit */ -out: - IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); - return retc; - -} -/* - * Process received multicast message and create the corresponding - * ip_vs_conn entries. - * Handles Version 0 & 1 - */ -static void ip_vs_process_message(struct net *net, __u8 *buffer, - const size_t buflen) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; - __u8 *p, *msg_end; - int i, nr_conns; - - if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { - IP_VS_DBG(2, "BACKUP, message header too short\n"); - return; - } - /* Convert size back to host byte order */ - m2->size = ntohs(m2->size); - - if (buflen != m2->size) { - IP_VS_DBG(2, "BACKUP, bogus message size\n"); - return; - } - /* SyncID sanity check */ - if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) { - IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); - return; - } - /* Handle version 1 message */ - if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) - && (m2->spare == 0)) { - - msg_end = buffer + sizeof(struct ip_vs_sync_mesg); - nr_conns = m2->nr_conns; - - for (i=0; iv4) > buffer+buflen) { - IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n"); - return; + if (!cp) { + /* + * Find the appropriate destination for the connection. + * If it is not found the connection will remain unbound + * but still handled. + */ + dest = ip_vs_find_dest(AF_INET, + (union nf_inet_addr *)&s->daddr, + s->dport, + (union nf_inet_addr *)&s->vaddr, + s->vport, + s->protocol); + /* Set the approprite ativity flag */ + if (s->protocol == IPPROTO_TCP) { + if (state != IP_VS_TCP_S_ESTABLISHED) + flags |= IP_VS_CONN_F_INACTIVE; + else + flags &= ~IP_VS_CONN_F_INACTIVE; + } else if (s->protocol == IPPROTO_SCTP) { + if (state != IP_VS_SCTP_S_ESTABLISHED) + flags |= IP_VS_CONN_F_INACTIVE; + else + flags &= ~IP_VS_CONN_F_INACTIVE; } - s = (union ip_vs_sync_conn *)p; - size = ntohs(s->v4.ver_size) & SVER_MASK; - msg_end = p + size; - /* Basic sanity checks */ - if (msg_end > buffer+buflen) { - IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); + cp = ip_vs_conn_new(¶m, + (union nf_inet_addr *)&s->daddr, + s->dport, flags, dest); + if (dest) + atomic_dec(&dest->refcnt); + if (!cp) { + pr_err("ip_vs_conn_new failed\n"); return; } - if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { - IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", - ntohs(s->v4.ver_size) >> SVER_SHIFT); - return; + } else if (!cp->dest) { + dest = ip_vs_try_bind_dest(cp); + if (dest) + atomic_dec(&dest->refcnt); + } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) && + (cp->state != state)) { + /* update active/inactive flag for the connection */ + dest = cp->dest; + if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && + (state != IP_VS_TCP_S_ESTABLISHED)) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + cp->flags |= IP_VS_CONN_F_INACTIVE; + } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && + (state == IP_VS_TCP_S_ESTABLISHED)) { + atomic_inc(&dest->activeconns); + atomic_dec(&dest->inactconns); + cp->flags &= ~IP_VS_CONN_F_INACTIVE; } - /* Process a single sync_conn */ - retc = ip_vs_proc_sync_conn(net, p, msg_end); - if (retc < 0) { - IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", - retc); - return; + } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) && + (cp->state != state)) { + dest = cp->dest; + if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && + (state != IP_VS_SCTP_S_ESTABLISHED)) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + cp->flags &= ~IP_VS_CONN_F_INACTIVE; } - /* Make sure we have 32 bit alignment */ - msg_end = p + ((size + 3) & ~3); } - } else { - /* Old type of message */ - ip_vs_process_message_v0(net, buffer, buflen); - return; + + if (opt) + memcpy(&cp->in_seq, opt, sizeof(*opt)); + atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); + cp->state = state; + cp->old_state = cp->state; + /* + * We can not recover the right timeout for templates + * in all cases, we can not find the right fwmark + * virtual service. If needed, we can do it for + * non-fwmark persistent services. + */ + if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table) + cp->timeout = pp->timeout_table[state]; + else + cp->timeout = (3*60*HZ); + ip_vs_conn_put(cp); } } @@ -1181,10 +511,8 @@ static int set_mcast_if(struct sock *sk, char *ifname) { struct net_device *dev; struct inet_sock *inet = inet_sk(sk); - struct net *net = sock_net(sk); - dev = __dev_get_by_name(net, ifname); - if (!dev) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) @@ -1203,33 +531,30 @@ static int set_mcast_if(struct sock *sk, char *ifname) * Set the maximum length of sync message according to the * specified interface's MTU. */ -static int set_sync_mesg_maxlen(struct net *net, int sync_state) +static int set_sync_mesg_maxlen(int sync_state) { - struct netns_ipvs *ipvs = net_ipvs(net); struct net_device *dev; int num; if (sync_state == IP_VS_STATE_MASTER) { - dev = __dev_get_by_name(net, ipvs->master_mcast_ifn); - if (!dev) + if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL) return -ENODEV; num = (dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr) - SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE; - ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN + + sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN + SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF); IP_VS_DBG(7, "setting the maximum length of sync sending " - "message %d.\n", ipvs->send_mesg_maxlen); + "message %d.\n", sync_send_mesg_maxlen); } else if (sync_state == IP_VS_STATE_BACKUP) { - dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn); - if (!dev) + if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL) return -ENODEV; - ipvs->recv_mesg_maxlen = dev->mtu - + sync_recv_mesg_maxlen = dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr); IP_VS_DBG(7, "setting the maximum length of sync receiving " - "message %d.\n", ipvs->recv_mesg_maxlen); + "message %d.\n", sync_recv_mesg_maxlen); } return 0; @@ -1244,7 +569,6 @@ static int set_sync_mesg_maxlen(struct net *net, int sync_state) static int join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) { - struct net *net = sock_net(sk); struct ip_mreqn mreq; struct net_device *dev; int ret; @@ -1252,8 +576,7 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); - dev = __dev_get_by_name(net, ifname); - if (!dev) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1270,13 +593,11 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) static int bind_mcastif_addr(struct socket *sock, char *ifname) { - struct net *net = sock_net(sock->sk); struct net_device *dev; __be32 addr; struct sockaddr_in sin; - dev = __dev_get_by_name(net, ifname); - if (!dev) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); @@ -1298,20 +619,19 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) /* * Set up sending multicast socket over UDP */ -static struct socket *make_send_sock(struct net *net) +static struct socket * make_send_sock(void) { - struct netns_ipvs *ipvs = net_ipvs(net); struct socket *sock; int result; /* First create a socket */ - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } - result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); + result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; @@ -1320,7 +640,7 @@ static struct socket *make_send_sock(struct net *net) set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, 1); - result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); + result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn); if (result < 0) { pr_err("Error binding address of the mcast interface\n"); goto error; @@ -1344,14 +664,13 @@ static struct socket *make_send_sock(struct net *net) /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct net *net) +static struct socket * make_receive_sock(void) { - struct netns_ipvs *ipvs = net_ipvs(net); struct socket *sock; int result; /* First create a socket */ - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); @@ -1370,7 +689,7 @@ static struct socket *make_receive_sock(struct net *net) /* join the multicast group */ result = join_mcast_group(sock->sk, (struct in_addr *) &mcast_addr.sin_addr, - ipvs->backup_mcast_ifn); + ip_vs_backup_mcast_ifn); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; @@ -1441,21 +760,20 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; - struct netns_ipvs *ipvs = net_ipvs(tinfo->net); struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " "syncid = %d\n", - ipvs->master_mcast_ifn, ipvs->master_syncid); + ip_vs_master_mcast_ifn, ip_vs_master_syncid); while (!kthread_should_stop()) { - while ((sb = sb_dequeue(ipvs))) { + while ((sb = sb_dequeue())) { ip_vs_send_sync_msg(tinfo->sock, sb->mesg); ip_vs_sync_buff_release(sb); } - /* check if entries stay in ipvs->sync_buff for 2 seconds */ - sb = get_curr_sync_buff(ipvs, 2 * HZ); + /* check if entries stay in curr_sb for 2 seconds */ + sb = get_curr_sync_buff(2 * HZ); if (sb) { ip_vs_send_sync_msg(tinfo->sock, sb->mesg); ip_vs_sync_buff_release(sb); @@ -1465,13 +783,14 @@ static int sync_thread_master(void *data) } /* clean up the sync_buff queue */ - while ((sb = sb_dequeue(ipvs))) + while ((sb=sb_dequeue())) { ip_vs_sync_buff_release(sb); + } /* clean up the current sync_buff */ - sb = get_curr_sync_buff(ipvs, 0); - if (sb) + if ((sb = get_curr_sync_buff(0))) { ip_vs_sync_buff_release(sb); + } /* release the sending multicast socket */ sock_release(tinfo->sock); @@ -1484,12 +803,11 @@ static int sync_thread_master(void *data) static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; - struct netns_ipvs *ipvs = net_ipvs(tinfo->net); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " "syncid = %d\n", - ipvs->backup_mcast_ifn, ipvs->backup_syncid); + ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(tinfo->sock->sk), @@ -1499,7 +817,7 @@ static int sync_thread_backup(void *data) /* do we have data now? */ while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { len = ip_vs_receive(tinfo->sock, tinfo->buf, - ipvs->recv_mesg_maxlen); + sync_recv_mesg_maxlen); if (len <= 0) { pr_err("receiving message error\n"); break; @@ -1508,7 +826,7 @@ static int sync_thread_backup(void *data) /* disable bottom half, because it accesses the data shared by softirq while getting/creating conns */ local_bh_disable(); - ip_vs_process_message(tinfo->net, tinfo->buf, len); + ip_vs_process_message(tinfo->buf, len); local_bh_enable(); } } @@ -1522,42 +840,41 @@ static int sync_thread_backup(void *data) } -int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) +int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) { struct ip_vs_sync_thread_data *tinfo; struct task_struct **realtask, *task; struct socket *sock; - struct netns_ipvs *ipvs = net_ipvs(net); char *name, *buf = NULL; int (*threadfn)(void *data); int result = -ENOMEM; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", - sizeof(struct ip_vs_sync_conn_v0)); + sizeof(struct ip_vs_sync_conn)); if (state == IP_VS_STATE_MASTER) { - if (ipvs->master_thread) + if (sync_master_thread) return -EEXIST; - strlcpy(ipvs->master_mcast_ifn, mcast_ifn, - sizeof(ipvs->master_mcast_ifn)); - ipvs->master_syncid = syncid; - realtask = &ipvs->master_thread; - name = "ipvs_master:%d"; + strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, + sizeof(ip_vs_master_mcast_ifn)); + ip_vs_master_syncid = syncid; + realtask = &sync_master_thread; + name = "ipvs_syncmaster"; threadfn = sync_thread_master; - sock = make_send_sock(net); + sock = make_send_sock(); } else if (state == IP_VS_STATE_BACKUP) { - if (ipvs->backup_thread) + if (sync_backup_thread) return -EEXIST; - strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, - sizeof(ipvs->backup_mcast_ifn)); - ipvs->backup_syncid = syncid; - realtask = &ipvs->backup_thread; - name = "ipvs_backup:%d"; + strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, + sizeof(ip_vs_backup_mcast_ifn)); + ip_vs_backup_syncid = syncid; + realtask = &sync_backup_thread; + name = "ipvs_syncbackup"; threadfn = sync_thread_backup; - sock = make_receive_sock(net); + sock = make_receive_sock(); } else { return -EINVAL; } @@ -1567,9 +884,9 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) goto out; } - set_sync_mesg_maxlen(net, state); + set_sync_mesg_maxlen(state); if (state == IP_VS_STATE_BACKUP) { - buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); + buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL); if (!buf) goto outsocket; } @@ -1578,11 +895,10 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) if (!tinfo) goto outbuf; - tinfo->net = net; tinfo->sock = sock; tinfo->buf = buf; - task = kthread_run(threadfn, tinfo, name, ipvs->gen); + task = kthread_run(threadfn, tinfo, name); if (IS_ERR(task)) { result = PTR_ERR(task); goto outtinfo; @@ -1590,7 +906,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) /* mark as active */ *realtask = task; - ipvs->sync_state |= state; + ip_vs_sync_state |= state; /* increase the module use count */ ip_vs_use_count_inc(); @@ -1608,18 +924,16 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) } -int stop_sync_thread(struct net *net, int state) +int stop_sync_thread(int state) { - struct netns_ipvs *ipvs = net_ipvs(net); - IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); if (state == IP_VS_STATE_MASTER) { - if (!ipvs->master_thread) + if (!sync_master_thread) return -ESRCH; pr_info("stopping master sync thread %d ...\n", - task_pid_nr(ipvs->master_thread)); + task_pid_nr(sync_master_thread)); /* * The lock synchronizes with sb_queue_tail(), so that we don't @@ -1627,21 +941,21 @@ int stop_sync_thread(struct net *net, int state) * progress of stopping the master sync daemon. */ - spin_lock_bh(&ipvs->sync_lock); - ipvs->sync_state &= ~IP_VS_STATE_MASTER; - spin_unlock_bh(&ipvs->sync_lock); - kthread_stop(ipvs->master_thread); - ipvs->master_thread = NULL; + spin_lock_bh(&ip_vs_sync_lock); + ip_vs_sync_state &= ~IP_VS_STATE_MASTER; + spin_unlock_bh(&ip_vs_sync_lock); + kthread_stop(sync_master_thread); + sync_master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { - if (!ipvs->backup_thread) + if (!sync_backup_thread) return -ESRCH; pr_info("stopping backup sync thread %d ...\n", - task_pid_nr(ipvs->backup_thread)); + task_pid_nr(sync_backup_thread)); - ipvs->sync_state &= ~IP_VS_STATE_BACKUP; - kthread_stop(ipvs->backup_thread); - ipvs->backup_thread = NULL; + ip_vs_sync_state &= ~IP_VS_STATE_BACKUP; + kthread_stop(sync_backup_thread); + sync_backup_thread = NULL; } else { return -EINVAL; } @@ -1651,42 +965,3 @@ int stop_sync_thread(struct net *net, int state) return 0; } - -/* - * Initialize data struct for each netns - */ -static int __net_init __ip_vs_sync_init(struct net *net) -{ - struct netns_ipvs *ipvs = net_ipvs(net); - - INIT_LIST_HEAD(&ipvs->sync_queue); - spin_lock_init(&ipvs->sync_lock); - spin_lock_init(&ipvs->sync_buff_lock); - - ipvs->sync_mcast_addr.sin_family = AF_INET; - ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT); - ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP); - return 0; -} - -static void __ip_vs_sync_cleanup(struct net *net) -{ - stop_sync_thread(net, IP_VS_STATE_MASTER); - stop_sync_thread(net, IP_VS_STATE_BACKUP); -} - -static struct pernet_operations ipvs_sync_ops = { - .init = __ip_vs_sync_init, - .exit = __ip_vs_sync_cleanup, -}; - - -int __init ip_vs_sync_init(void) -{ - return register_pernet_subsys(&ipvs_sync_ops); -} - -void __exit ip_vs_sync_cleanup(void) -{ - unregister_pernet_subsys(&ipvs_sync_ops); -} diff --git a/trunk/net/netfilter/ipvs/ip_vs_xmit.c b/trunk/net/netfilter/ipvs/ip_vs_xmit.c index 1f2a4e35fb11..5325a3fbe4ac 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_xmit.c +++ b/trunk/net/netfilter/ipvs/ip_vs_xmit.c @@ -175,6 +175,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb) .fl4_tos = RT_TOS(iph->tos), .mark = skb->mark, }; + struct rtable *rt; if (ip_route_output_key(net, &rt, &fl)) return 0; @@ -389,8 +390,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) && - !skb_is_gso(skb)) { + if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { ip_rt_put(rt); icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL("%s(): frag needed\n", __func__); @@ -443,7 +443,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if (skb->len > mtu && !skb_is_gso(skb)) { + if (skb->len > mtu) { if (!skb->dev) { struct net *net = dev_net(skb_dst(skb)->dev); @@ -543,8 +543,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) && - !skb_is_gso(skb)) { + if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for"); @@ -659,7 +658,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if (skb->len > mtu && !skb_is_gso(skb)) { + if (skb->len > mtu) { if (!skb->dev) { struct net *net = dev_net(skb_dst(skb)->dev); @@ -774,8 +773,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, df |= (old_iph->frag_off & htons(IP_DF)); - if ((old_iph->frag_off & htons(IP_DF) && - mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) { + if ((old_iph->frag_off & htons(IP_DF)) + && mtu < ntohs(old_iph->tot_len)) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error_put; @@ -887,8 +886,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); - if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) && - !skb_is_gso(skb)) { + if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { if (!skb->dev) { struct net *net = dev_net(skb_dst(skb)->dev); @@ -993,8 +991,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu && - !skb_is_gso(skb)) { + if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); IP_VS_DBG_RL("%s(): frag needed\n", __func__); @@ -1161,8 +1158,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) && - !skb_is_gso(skb)) { + if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error_put; @@ -1276,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->dst); - if (skb->len > mtu && !skb_is_gso(skb)) { + if (skb->len > mtu) { if (!skb->dev) { struct net *net = dev_net(skb_dst(skb)->dev); diff --git a/trunk/net/netfilter/nf_conntrack_broadcast.c b/trunk/net/netfilter/nf_conntrack_broadcast.c deleted file mode 100644 index 4e99cca61612..000000000000 --- a/trunk/net/netfilter/nf_conntrack_broadcast.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * broadcast connection tracking helper - * - * (c) 2005 Patrick McHardy - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -int nf_conntrack_broadcast_help(struct sk_buff *skb, - unsigned int protoff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int timeout) -{ - struct nf_conntrack_expect *exp; - struct iphdr *iph = ip_hdr(skb); - struct rtable *rt = skb_rtable(skb); - struct in_device *in_dev; - struct nf_conn_help *help = nfct_help(ct); - __be32 mask = 0; - - /* we're only interested in locally generated packets */ - if (skb->sk == NULL) - goto out; - if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) - goto out; - if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) - goto out; - - rcu_read_lock(); - in_dev = __in_dev_get_rcu(rt->dst.dev); - if (in_dev != NULL) { - for_primary_ifa(in_dev) { - if (ifa->ifa_broadcast == iph->daddr) { - mask = ifa->ifa_mask; - break; - } - } endfor_ifa(in_dev); - } - rcu_read_unlock(); - - if (mask == 0) - goto out; - - exp = nf_ct_expect_alloc(ct); - if (exp == NULL) - goto out; - - exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; - exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port; - - exp->mask.src.u3.ip = mask; - exp->mask.src.u.udp.port = htons(0xFFFF); - - exp->expectfn = NULL; - exp->flags = NF_CT_EXPECT_PERMANENT; - exp->class = NF_CT_EXPECT_CLASS_DEFAULT; - exp->helper = NULL; - - nf_ct_expect_related(exp); - nf_ct_expect_put(exp); - - nf_ct_refresh(ct, skb, timeout * HZ); -out: - return NF_ACCEPT; -} -EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help); - -MODULE_LICENSE("GPL"); diff --git a/trunk/net/netfilter/nf_conntrack_core.c b/trunk/net/netfilter/nf_conntrack_core.c index 1909311c392a..e61511929c66 100644 --- a/trunk/net/netfilter/nf_conntrack_core.c +++ b/trunk/net/netfilter/nf_conntrack_core.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -283,11 +282,6 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); static void death_by_timeout(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; - struct nf_conn_tstamp *tstamp; - - tstamp = nf_conn_tstamp_find(ct); - if (tstamp && tstamp->stop == 0) - tstamp->stop = ktime_to_ns(ktime_get_real()); if (!test_bit(IPS_DYING_BIT, &ct->status) && unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { @@ -425,7 +419,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conn_help *help; - struct nf_conn_tstamp *tstamp; struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; @@ -493,16 +486,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) ct->timeout.expires += jiffies; add_timer(&ct->timeout); atomic_inc(&ct->ct_general.use); - ct->status |= IPS_CONFIRMED; - - /* set conntrack timestamp, if enabled. */ - tstamp = nf_conn_tstamp_find(ct); - if (tstamp) { - if (skb->tstamp.tv64 == 0) - __net_timestamp((struct sk_buff *)skb); + set_bit(IPS_CONFIRMED_BIT, &ct->status); - tstamp->start = ktime_to_ns(skb->tstamp); - } /* Since the lookup is lockless, hash insertion must be done after * starting the timer and setting the CONFIRMED bit. The RCU barriers * guarantee that no other CPU can find the conntrack before the above @@ -670,8 +655,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone, * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. */ memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, - offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); + sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; @@ -761,7 +745,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, } nf_ct_acct_ext_add(ct, GFP_ATOMIC); - nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, @@ -1202,11 +1185,6 @@ struct __nf_ct_flush_report { static int kill_report(struct nf_conn *i, void *data) { struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; - struct nf_conn_tstamp *tstamp; - - tstamp = nf_conn_tstamp_find(i); - if (tstamp && tstamp->stop == 0) - tstamp->stop = ktime_to_ns(ktime_get_real()); /* If we fail to deliver the event, death_by_timeout() will retry */ if (nf_conntrack_event_report(IPCT_DESTROY, i, @@ -1223,9 +1201,9 @@ static int kill_all(struct nf_conn *i, void *data) return 1; } -void nf_ct_free_hashtable(void *hash, unsigned int size) +void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) { - if (is_vmalloc_addr(hash)) + if (vmalloced) vfree(hash); else free_pages((unsigned long)hash, @@ -1292,7 +1270,8 @@ static void nf_conntrack_cleanup_net(struct net *net) goto i_see_dead_people; } - nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); + nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, + net->ct.htable_size); nf_conntrack_ecache_fini(net); nf_conntrack_acct_fini(net); nf_conntrack_expect_fini(net); @@ -1321,18 +1300,21 @@ void nf_conntrack_cleanup(struct net *net) } } -void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) +void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) { struct hlist_nulls_head *hash; unsigned int nr_slots, i; size_t sz; + *vmalloced = 0; + BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); if (!hash) { + *vmalloced = 1; printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); @@ -1348,7 +1330,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { - int i, bucket; + int i, bucket, vmalloced, old_vmalloced; unsigned int hashsize, old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; @@ -1365,7 +1347,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) if (!hashsize) return -EINVAL; - hash = nf_ct_alloc_hashtable(&hashsize, 1); + hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); if (!hash) return -ENOMEM; @@ -1387,13 +1369,15 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) } } old_size = init_net.ct.htable_size; + old_vmalloced = init_net.ct.hash_vmalloc; old_hash = init_net.ct.hash; init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; + init_net.ct.hash_vmalloc = vmalloced; init_net.ct.hash = hash; spin_unlock_bh(&nf_conntrack_lock); - nf_ct_free_hashtable(old_hash, old_size); + nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); @@ -1506,7 +1490,8 @@ static int nf_conntrack_init_net(struct net *net) } net->ct.htable_size = nf_conntrack_htable_size; - net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); + net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, + &net->ct.hash_vmalloc, 1); if (!net->ct.hash) { ret = -ENOMEM; printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); @@ -1518,9 +1503,6 @@ static int nf_conntrack_init_net(struct net *net) ret = nf_conntrack_acct_init(net); if (ret < 0) goto err_acct; - ret = nf_conntrack_tstamp_init(net); - if (ret < 0) - goto err_tstamp; ret = nf_conntrack_ecache_init(net); if (ret < 0) goto err_ecache; @@ -1528,13 +1510,12 @@ static int nf_conntrack_init_net(struct net *net) return 0; err_ecache: - nf_conntrack_tstamp_fini(net); -err_tstamp: nf_conntrack_acct_fini(net); err_acct: nf_conntrack_expect_fini(net); err_expect: - nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); + nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, + net->ct.htable_size); err_hash: kmem_cache_destroy(net->ct.nf_conntrack_cachep); err_cache: diff --git a/trunk/net/netfilter/nf_conntrack_expect.c b/trunk/net/netfilter/nf_conntrack_expect.c index cd1e8e0970f2..a20fb0bd1efe 100644 --- a/trunk/net/netfilter/nf_conntrack_expect.c +++ b/trunk/net/netfilter/nf_conntrack_expect.c @@ -319,8 +319,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) const struct nf_conntrack_expect_policy *p; unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); - /* two references : one for hash insert, one for the timer */ - atomic_add(2, &exp->use); + atomic_inc(&exp->use); if (master_help) { hlist_add_head(&exp->lnode, &master_help->expectations); @@ -334,14 +333,12 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) setup_timer(&exp->timeout, nf_ct_expectation_timed_out, (unsigned long)exp); if (master_help) { - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[exp->class]; + p = &master_help->helper->expect_policy[exp->class]; exp->timeout.expires = jiffies + p->timeout * HZ; } add_timer(&exp->timeout); + atomic_inc(&exp->use); NF_CT_STAT_INC(net, expect_create); } @@ -372,10 +369,7 @@ static inline int refresh_timer(struct nf_conntrack_expect *i) if (!del_timer(&i->timeout)) return 0; - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[i->class]; + p = &master_help->helper->expect_policy[i->class]; i->timeout.expires = jiffies + p->timeout * HZ; add_timer(&i->timeout); return 1; @@ -413,10 +407,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) } /* Will be over limit? */ if (master_help) { - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[expect->class]; + p = &master_help->helper->expect_policy[expect->class]; if (p->max_expected && master_help->expecting[expect->class] >= p->max_expected) { evict_oldest_expect(master, expect); @@ -487,7 +478,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq) struct hlist_node *n; for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { - n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + n = rcu_dereference(net->ct.expect_hash[st->bucket].first); if (n) return n; } @@ -500,11 +491,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; - head = rcu_dereference(hlist_next_rcu(head)); + head = rcu_dereference(head->next); while (head == NULL) { if (++st->bucket >= nf_ct_expect_hsize) return NULL; - head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); + head = rcu_dereference(net->ct.expect_hash[st->bucket].first); } return head; } @@ -639,7 +630,8 @@ int nf_conntrack_expect_init(struct net *net) } net->ct.expect_count = 0; - net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); + net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, + &net->ct.expect_vmalloc, 0); if (net->ct.expect_hash == NULL) goto err1; @@ -661,7 +653,8 @@ int nf_conntrack_expect_init(struct net *net) if (net_eq(net, &init_net)) kmem_cache_destroy(nf_ct_expect_cachep); err2: - nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); + nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, + nf_ct_expect_hsize); err1: return err; } @@ -673,5 +666,6 @@ void nf_conntrack_expect_fini(struct net *net) rcu_barrier(); /* Wait for call_rcu() before destroy */ kmem_cache_destroy(nf_ct_expect_cachep); } - nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); + nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, + nf_ct_expect_hsize); } diff --git a/trunk/net/netfilter/nf_conntrack_extend.c b/trunk/net/netfilter/nf_conntrack_extend.c index 80a23ed62bb0..bd82450c193f 100644 --- a/trunk/net/netfilter/nf_conntrack_extend.c +++ b/trunk/net/netfilter/nf_conntrack_extend.c @@ -140,16 +140,15 @@ static void update_alloc_size(struct nf_ct_ext_type *type) /* This assumes that extended areas in conntrack for the types whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */ for (i = min; i <= max; i++) { - t1 = rcu_dereference_protected(nf_ct_ext_types[i], - lockdep_is_held(&nf_ct_ext_type_mutex)); + t1 = nf_ct_ext_types[i]; if (!t1) continue; - t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) + - t1->len; + t1->alloc_size = sizeof(struct nf_ct_ext) + + ALIGN(sizeof(struct nf_ct_ext), t1->align) + + t1->len; for (j = 0; j < NF_CT_EXT_NUM; j++) { - t2 = rcu_dereference_protected(nf_ct_ext_types[j], - lockdep_is_held(&nf_ct_ext_type_mutex)); + t2 = nf_ct_ext_types[j]; if (t2 == NULL || t2 == t1 || (t2->flags & NF_CT_EXT_F_PREALLOC) == 0) continue; diff --git a/trunk/net/netfilter/nf_conntrack_helper.c b/trunk/net/netfilter/nf_conntrack_helper.c index 1bdfea357955..59e1a4cd4e8b 100644 --- a/trunk/net/netfilter/nf_conntrack_helper.c +++ b/trunk/net/netfilter/nf_conntrack_helper.c @@ -33,6 +33,7 @@ static DEFINE_MUTEX(nf_ct_helper_mutex); static struct hlist_head *nf_ct_helper_hash __read_mostly; static unsigned int nf_ct_helper_hsize __read_mostly; static unsigned int nf_ct_helper_count __read_mostly; +static int nf_ct_helper_vmalloc; /* Stupid hash, but collision free for the default registrations of the @@ -157,10 +158,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i, struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i); struct nf_conn_help *help = nfct_help(ct); - if (help && rcu_dereference_protected( - help->helper, - lockdep_is_held(&nf_conntrack_lock) - ) == me) { + if (help && help->helper == me) { nf_conntrack_event(IPCT_HELPER, ct); rcu_assign_pointer(help->helper, NULL); } @@ -212,10 +210,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, hlist_for_each_entry_safe(exp, n, next, &net->ct.expect_hash[i], hnode) { struct nf_conn_help *help = nfct_help(exp->master); - if ((rcu_dereference_protected( - help->helper, - lockdep_is_held(&nf_conntrack_lock) - ) == me || exp->helper == me) && + if ((help->helper == me || exp->helper == me) && del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); @@ -266,7 +261,8 @@ int nf_conntrack_helper_init(void) int err; nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ - nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); + nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, + &nf_ct_helper_vmalloc, 0); if (!nf_ct_helper_hash) return -ENOMEM; @@ -277,12 +273,14 @@ int nf_conntrack_helper_init(void) return 0; err1: - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, + nf_ct_helper_hsize); return err; } void nf_conntrack_helper_fini(void) { nf_ct_extend_unregister(&helper_extend); - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, + nf_ct_helper_hsize); } diff --git a/trunk/net/netfilter/nf_conntrack_netbios_ns.c b/trunk/net/netfilter/nf_conntrack_netbios_ns.c index 4c8f30a3d6d2..aadde018a072 100644 --- a/trunk/net/netfilter/nf_conntrack_netbios_ns.c +++ b/trunk/net/netfilter/nf_conntrack_netbios_ns.c @@ -18,7 +18,14 @@ #include #include #include +#include +#include +#include +#include #include +#include +#include +#include #include #include @@ -33,26 +40,75 @@ MODULE_ALIAS("ip_conntrack_netbios_ns"); MODULE_ALIAS_NFCT_HELPER("netbios_ns"); static unsigned int timeout __read_mostly = 3; -module_param(timeout, uint, S_IRUSR); +module_param(timeout, uint, 0400); MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); +static int help(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, enum ip_conntrack_info ctinfo) +{ + struct nf_conntrack_expect *exp; + struct iphdr *iph = ip_hdr(skb); + struct rtable *rt = skb_rtable(skb); + struct in_device *in_dev; + __be32 mask = 0; + + /* we're only interested in locally generated packets */ + if (skb->sk == NULL) + goto out; + if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) + goto out; + if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) + goto out; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(rt->dst.dev); + if (in_dev != NULL) { + for_primary_ifa(in_dev) { + if (ifa->ifa_broadcast == iph->daddr) { + mask = ifa->ifa_mask; + break; + } + } endfor_ifa(in_dev); + } + rcu_read_unlock(); + + if (mask == 0) + goto out; + + exp = nf_ct_expect_alloc(ct); + if (exp == NULL) + goto out; + + exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; + exp->tuple.src.u.udp.port = htons(NMBD_PORT); + + exp->mask.src.u3.ip = mask; + exp->mask.src.u.udp.port = htons(0xFFFF); + + exp->expectfn = NULL; + exp->flags = NF_CT_EXPECT_PERMANENT; + exp->class = NF_CT_EXPECT_CLASS_DEFAULT; + exp->helper = NULL; + + nf_ct_expect_related(exp); + nf_ct_expect_put(exp); + + nf_ct_refresh(ct, skb, timeout * HZ); +out: + return NF_ACCEPT; +} + static struct nf_conntrack_expect_policy exp_policy = { .max_expected = 1, }; -static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff, - struct nf_conn *ct, enum ip_conntrack_info ctinfo) -{ - return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout); -} - static struct nf_conntrack_helper helper __read_mostly = { .name = "netbios-ns", - .tuple.src.l3num = NFPROTO_IPV4, + .tuple.src.l3num = AF_INET, .tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT), .tuple.dst.protonum = IPPROTO_UDP, .me = THIS_MODULE, - .help = netbios_ns_help, + .help = help, .expect_policy = &exp_policy, }; diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index 3fec12c570a8..93297aaceb2b 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -42,7 +42,6 @@ #include #include #include -#include #ifdef CONFIG_NF_NAT_NEEDED #include #include @@ -231,33 +230,6 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, return -1; } -static int -ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) -{ - struct nlattr *nest_count; - const struct nf_conn_tstamp *tstamp; - - tstamp = nf_conn_tstamp_find(ct); - if (!tstamp) - return 0; - - nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED); - if (!nest_count) - goto nla_put_failure; - - NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); - if (tstamp->stop != 0) { - NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, - cpu_to_be64(tstamp->stop)); - } - nla_nest_end(skb, nest_count); - - return 0; - -nla_put_failure: - return -1; -} - #ifdef CONFIG_NF_CONNTRACK_MARK static inline int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) @@ -432,7 +404,6 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, ctnetlink_dump_timeout(skb, ct) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || - ctnetlink_dump_timestamp(skb, ct) < 0 || ctnetlink_dump_protoinfo(skb, ct) < 0 || ctnetlink_dump_helpinfo(skb, ct) < 0 || ctnetlink_dump_mark(skb, ct) < 0 || @@ -499,18 +470,6 @@ ctnetlink_secctx_size(const struct nf_conn *ct) #endif } -static inline size_t -ctnetlink_timestamp_size(const struct nf_conn *ct) -{ -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP - if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) - return 0; - return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t)); -#else - return 0; -#endif -} - static inline size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) { @@ -522,7 +481,6 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ + ctnetlink_counters_size(ct) - + ctnetlink_timestamp_size(ct) + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ + nla_total_size(0) /* CTA_PROTOINFO */ + nla_total_size(0) /* CTA_HELP */ @@ -613,8 +571,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) if (events & (1 << IPCT_DESTROY)) { if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || - ctnetlink_dump_timestamp(skb, ct) < 0) + ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) goto nla_put_failure; } else { if (ctnetlink_dump_timeout(skb, ct) < 0) @@ -1400,7 +1357,6 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, } nf_ct_acct_ext_add(ct, GFP_ATOMIC); - nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; @@ -1419,7 +1375,6 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, } #endif - memset(&ct->proto, 0, sizeof(ct->proto)); if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) diff --git a/trunk/net/netfilter/nf_conntrack_proto.c b/trunk/net/netfilter/nf_conntrack_proto.c index 5701c8dd783c..dc7bb74110df 100644 --- a/trunk/net/netfilter/nf_conntrack_proto.c +++ b/trunk/net/netfilter/nf_conntrack_proto.c @@ -166,7 +166,6 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) { int ret = 0; - struct nf_conntrack_l3proto *old; if (proto->l3proto >= AF_MAX) return -EBUSY; @@ -175,9 +174,7 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) return -EINVAL; mutex_lock(&nf_ct_proto_mutex); - old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], - lockdep_is_held(&nf_ct_proto_mutex)); - if (old != &nf_conntrack_l3proto_generic) { + if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) { ret = -EBUSY; goto out_unlock; } @@ -204,9 +201,7 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) BUG_ON(proto->l3proto >= AF_MAX); mutex_lock(&nf_ct_proto_mutex); - BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], - lockdep_is_held(&nf_ct_proto_mutex) - ) != proto); + BUG_ON(nf_ct_l3protos[proto->l3proto] != proto); rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], &nf_conntrack_l3proto_generic); nf_ct_l3proto_unregister_sysctl(proto); @@ -284,7 +279,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) mutex_lock(&nf_ct_proto_mutex); if (!nf_ct_protos[l4proto->l3proto]) { /* l3proto may be loaded latter. */ - struct nf_conntrack_l4proto __rcu **proto_array; + struct nf_conntrack_l4proto **proto_array; int i; proto_array = kmalloc(MAX_NF_CT_PROTO * @@ -296,7 +291,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) } for (i = 0; i < MAX_NF_CT_PROTO; i++) - RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic); + proto_array[i] = &nf_conntrack_l4proto_generic; /* Before making proto_array visible to lockless readers, * we must make sure its content is committed to memory. @@ -304,10 +299,8 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) smp_wmb(); nf_ct_protos[l4proto->l3proto] = proto_array; - } else if (rcu_dereference_protected( - nf_ct_protos[l4proto->l3proto][l4proto->l4proto], - lockdep_is_held(&nf_ct_proto_mutex) - ) != &nf_conntrack_l4proto_generic) { + } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != + &nf_conntrack_l4proto_generic) { ret = -EBUSY; goto out_unlock; } @@ -338,10 +331,7 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) BUG_ON(l4proto->l3proto >= PF_MAX); mutex_lock(&nf_ct_proto_mutex); - BUG_ON(rcu_dereference_protected( - nf_ct_protos[l4proto->l3proto][l4proto->l4proto], - lockdep_is_held(&nf_ct_proto_mutex) - ) != l4proto); + BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto); rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], &nf_conntrack_l4proto_generic); nf_ct_l4proto_unregister_sysctl(l4proto); diff --git a/trunk/net/netfilter/nf_conntrack_proto_dccp.c b/trunk/net/netfilter/nf_conntrack_proto_dccp.c index 9ae57c57c50e..5292560d6d4a 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_dccp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_dccp.c @@ -452,9 +452,6 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.state = CT_DCCP_NONE; - ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; - ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; - ct->proto.dccp.handshake_seq = 0; return true; out_invalid: diff --git a/trunk/net/netfilter/nf_conntrack_proto_sctp.c b/trunk/net/netfilter/nf_conntrack_proto_sctp.c index 6f4ee70f460b..c6049c2d5ea8 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_sctp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_sctp.c @@ -413,7 +413,6 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, test_bit(SCTP_CID_COOKIE_ACK, map)) return false; - memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); new_state = SCTP_CONNTRACK_MAX; for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Don't need lock here: this conntrack not in circulation yet */ diff --git a/trunk/net/netfilter/nf_conntrack_proto_tcp.c b/trunk/net/netfilter/nf_conntrack_proto_tcp.c index 6f38d0e2ea4a..3fb2b73b24dc 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_tcp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_tcp.c @@ -1066,7 +1066,9 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, BUG_ON(th == NULL); /* Don't need lock here: this conntrack not in circulation yet */ - new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE]; + new_state + = tcp_conntracks[0][get_conntrack_index(th)] + [TCP_CONNTRACK_NONE]; /* Invalid: delete conntrack */ if (new_state >= TCP_CONNTRACK_MAX) { @@ -1075,7 +1077,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, } if (new_state == TCP_CONNTRACK_SYN_SENT) { - memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); /* SYN packet */ ct->proto.tcp.seen[0].td_end = segment_seq_plus_len(ntohl(th->seq), skb->len, @@ -1087,11 +1088,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, ct->proto.tcp.seen[0].td_end; tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); + ct->proto.tcp.seen[1].flags = 0; } else if (nf_ct_tcp_loose == 0) { /* Don't try to pick up connections. */ return false; } else { - memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); /* * We are in the middle of a connection, * its history is lost for us. @@ -1106,6 +1107,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, ct->proto.tcp.seen[0].td_maxend = ct->proto.tcp.seen[0].td_end + ct->proto.tcp.seen[0].td_maxwin; + ct->proto.tcp.seen[0].td_scale = 0; /* We assume SACK and liberal window checking to handle * window scaling */ @@ -1114,7 +1116,13 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, IP_CT_TCP_FLAG_BE_LIBERAL; } + ct->proto.tcp.seen[1].td_end = 0; + ct->proto.tcp.seen[1].td_maxend = 0; + ct->proto.tcp.seen[1].td_maxwin = 0; + ct->proto.tcp.seen[1].td_scale = 0; + /* tcp_packet will set them */ + ct->proto.tcp.state = TCP_CONNTRACK_NONE; ct->proto.tcp.last_index = TCP_NONE_SET; pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " diff --git a/trunk/net/netfilter/nf_conntrack_snmp.c b/trunk/net/netfilter/nf_conntrack_snmp.c deleted file mode 100644 index 6e545e26289e..000000000000 --- a/trunk/net/netfilter/nf_conntrack_snmp.c +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SNMP service broadcast connection tracking helper - * - * (c) 2011 Jiri Olsa - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include -#include - -#include -#include -#include - -#define SNMP_PORT 161 - -MODULE_AUTHOR("Jiri Olsa "); -MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NFCT_HELPER("snmp"); - -static unsigned int timeout __read_mostly = 30; -module_param(timeout, uint, S_IRUSR); -MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); - -int (*nf_nat_snmp_hook)(struct sk_buff *skb, - unsigned int protoff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo); -EXPORT_SYMBOL_GPL(nf_nat_snmp_hook); - -static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff, - struct nf_conn *ct, enum ip_conntrack_info ctinfo) -{ - typeof(nf_nat_snmp_hook) nf_nat_snmp; - - nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout); - - nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook); - if (nf_nat_snmp && ct->status & IPS_NAT_MASK) - return nf_nat_snmp(skb, protoff, ct, ctinfo); - - return NF_ACCEPT; -} - -static struct nf_conntrack_expect_policy exp_policy = { - .max_expected = 1, -}; - -static struct nf_conntrack_helper helper __read_mostly = { - .name = "snmp", - .tuple.src.l3num = NFPROTO_IPV4, - .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), - .tuple.dst.protonum = IPPROTO_UDP, - .me = THIS_MODULE, - .help = snmp_conntrack_help, - .expect_policy = &exp_policy, -}; - -static int __init nf_conntrack_snmp_init(void) -{ - exp_policy.timeout = timeout; - return nf_conntrack_helper_register(&helper); -} - -static void __exit nf_conntrack_snmp_fini(void) -{ - nf_conntrack_helper_unregister(&helper); -} - -module_init(nf_conntrack_snmp_init); -module_exit(nf_conntrack_snmp_fini); diff --git a/trunk/net/netfilter/nf_conntrack_standalone.c b/trunk/net/netfilter/nf_conntrack_standalone.c index 0ae142825881..b4d7f0f24b27 100644 --- a/trunk/net/netfilter/nf_conntrack_standalone.c +++ b/trunk/net/netfilter/nf_conntrack_standalone.c @@ -29,8 +29,6 @@ #include #include #include -#include -#include MODULE_LICENSE("GPL"); @@ -47,7 +45,6 @@ EXPORT_SYMBOL_GPL(print_tuple); struct ct_iter_state { struct seq_net_private p; unsigned int bucket; - u_int64_t time_now; }; static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) @@ -59,7 +56,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) for (st->bucket = 0; st->bucket < net->ct.htable_size; st->bucket++) { - n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); + n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) return n; } @@ -72,15 +69,13 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; - head = rcu_dereference(hlist_nulls_next_rcu(head)); + head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { if (++st->bucket >= net->ct.htable_size) return NULL; } - head = rcu_dereference( - hlist_nulls_first_rcu( - &net->ct.hash[st->bucket])); + head = rcu_dereference(net->ct.hash[st->bucket].first); } return head; } @@ -98,9 +93,6 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) static void *ct_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { - struct ct_iter_state *st = seq->private; - - st->time_now = ktime_to_ns(ktime_get_real()); rcu_read_lock(); return ct_get_idx(seq, *pos); } @@ -140,34 +132,6 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) } #endif -#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP -static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) -{ - struct ct_iter_state *st = s->private; - struct nf_conn_tstamp *tstamp; - s64 delta_time; - - tstamp = nf_conn_tstamp_find(ct); - if (tstamp) { - delta_time = st->time_now - tstamp->start; - if (delta_time > 0) - delta_time = div_s64(delta_time, NSEC_PER_SEC); - else - delta_time = 0; - - return seq_printf(s, "delta-time=%llu ", - (unsigned long long)delta_time); - } - return 0; -} -#else -static inline int -ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) -{ - return 0; -} -#endif - /* return 0 on success, 1 in case of error */ static int ct_seq_show(struct seq_file *s, void *v) { @@ -236,9 +200,6 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; #endif - if (ct_show_delta_time(s, ct)) - goto release; - if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) goto release; diff --git a/trunk/net/netfilter/nf_conntrack_timestamp.c b/trunk/net/netfilter/nf_conntrack_timestamp.c deleted file mode 100644 index af7dd31af0a1..000000000000 --- a/trunk/net/netfilter/nf_conntrack_timestamp.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * (C) 2010 Pablo Neira Ayuso - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation (or any later at your option). - */ - -#include -#include -#include -#include - -#include -#include -#include - -static int nf_ct_tstamp __read_mostly; - -module_param_named(tstamp, nf_ct_tstamp, bool, 0644); -MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping."); - -#ifdef CONFIG_SYSCTL -static struct ctl_table tstamp_sysctl_table[] = { - { - .procname = "nf_conntrack_timestamp", - .data = &init_net.ct.sysctl_tstamp, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - {} -}; -#endif /* CONFIG_SYSCTL */ - -static struct nf_ct_ext_type tstamp_extend __read_mostly = { - .len = sizeof(struct nf_conn_tstamp), - .align = __alignof__(struct nf_conn_tstamp), - .id = NF_CT_EXT_TSTAMP, -}; - -#ifdef CONFIG_SYSCTL -static int nf_conntrack_tstamp_init_sysctl(struct net *net) -{ - struct ctl_table *table; - - table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table), - GFP_KERNEL); - if (!table) - goto out; - - table[0].data = &net->ct.sysctl_tstamp; - - net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); - if (!net->ct.tstamp_sysctl_header) { - printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); - goto out_register; - } - return 0; - -out_register: - kfree(table); -out: - return -ENOMEM; -} - -static void nf_conntrack_tstamp_fini_sysctl(struct net *net) -{ - struct ctl_table *table; - - table = net->ct.tstamp_sysctl_header->ctl_table_arg; - unregister_net_sysctl_table(net->ct.tstamp_sysctl_header); - kfree(table); -} -#else -static int nf_conntrack_tstamp_init_sysctl(struct net *net) -{ - return 0; -} - -static void nf_conntrack_tstamp_fini_sysctl(struct net *net) -{ -} -#endif - -int nf_conntrack_tstamp_init(struct net *net) -{ - int ret; - - net->ct.sysctl_tstamp = nf_ct_tstamp; - - if (net_eq(net, &init_net)) { - ret = nf_ct_extend_register(&tstamp_extend); - if (ret < 0) { - printk(KERN_ERR "nf_ct_tstamp: Unable to register " - "extension\n"); - goto out_extend_register; - } - } - - ret = nf_conntrack_tstamp_init_sysctl(net); - if (ret < 0) - goto out_sysctl; - - return 0; - -out_sysctl: - if (net_eq(net, &init_net)) - nf_ct_extend_unregister(&tstamp_extend); -out_extend_register: - return ret; -} - -void nf_conntrack_tstamp_fini(struct net *net) -{ - nf_conntrack_tstamp_fini_sysctl(net); - if (net_eq(net, &init_net)) - nf_ct_extend_unregister(&tstamp_extend); -} diff --git a/trunk/net/netfilter/nf_log.c b/trunk/net/netfilter/nf_log.c index 20c775cff2a8..b07393eab88e 100644 --- a/trunk/net/netfilter/nf_log.c +++ b/trunk/net/netfilter/nf_log.c @@ -161,8 +161,7 @@ static int seq_show(struct seq_file *s, void *v) struct nf_logger *t; int ret; - logger = rcu_dereference_protected(nf_loggers[*pos], - lockdep_is_held(&nf_log_mutex)); + logger = nf_loggers[*pos]; if (!logger) ret = seq_printf(s, "%2lld NONE (", *pos); @@ -250,8 +249,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); - logger = rcu_dereference_protected(nf_loggers[tindex], - lockdep_is_held(&nf_log_mutex)); + logger = nf_loggers[tindex]; if (!logger) table->data = "NONE"; else diff --git a/trunk/net/netfilter/nf_queue.c b/trunk/net/netfilter/nf_queue.c index 5ab22e2bbd7d..74aebed5bd28 100644 --- a/trunk/net/netfilter/nf_queue.c +++ b/trunk/net/netfilter/nf_queue.c @@ -27,17 +27,14 @@ static DEFINE_MUTEX(queue_handler_mutex); int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) { int ret; - const struct nf_queue_handler *old; if (pf >= ARRAY_SIZE(queue_handler)) return -EINVAL; mutex_lock(&queue_handler_mutex); - old = rcu_dereference_protected(queue_handler[pf], - lockdep_is_held(&queue_handler_mutex)); - if (old == qh) + if (queue_handler[pf] == qh) ret = -EEXIST; - else if (old) + else if (queue_handler[pf]) ret = -EBUSY; else { rcu_assign_pointer(queue_handler[pf], qh); @@ -52,15 +49,11 @@ EXPORT_SYMBOL(nf_register_queue_handler); /* The caller must flush their queue before this */ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) { - const struct nf_queue_handler *old; - if (pf >= ARRAY_SIZE(queue_handler)) return -EINVAL; mutex_lock(&queue_handler_mutex); - old = rcu_dereference_protected(queue_handler[pf], - lockdep_is_held(&queue_handler_mutex)); - if (old && old != qh) { + if (queue_handler[pf] && queue_handler[pf] != qh) { mutex_unlock(&queue_handler_mutex); return -EINVAL; } @@ -80,10 +73,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) mutex_lock(&queue_handler_mutex); for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) { - if (rcu_dereference_protected( - queue_handler[pf], - lockdep_is_held(&queue_handler_mutex) - ) == qh) + if (queue_handler[pf] == qh) rcu_assign_pointer(queue_handler[pf], NULL); } mutex_unlock(&queue_handler_mutex); @@ -125,7 +115,7 @@ static int __nf_queue(struct sk_buff *skb, int (*okfn)(struct sk_buff *), unsigned int queuenum) { - int status = -ENOENT; + int status; struct nf_queue_entry *entry = NULL; #ifdef CONFIG_BRIDGE_NETFILTER struct net_device *physindev; @@ -138,20 +128,16 @@ static int __nf_queue(struct sk_buff *skb, rcu_read_lock(); qh = rcu_dereference(queue_handler[pf]); - if (!qh) { - status = -ESRCH; + if (!qh) goto err_unlock; - } afinfo = nf_get_afinfo(pf); if (!afinfo) goto err_unlock; entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); - if (!entry) { - status = -ENOMEM; + if (!entry) goto err_unlock; - } *entry = (struct nf_queue_entry) { .skb = skb, @@ -165,9 +151,11 @@ static int __nf_queue(struct sk_buff *skb, /* If it's going away, ignore hook. */ if (!try_module_get(entry->elem->owner)) { - status = -ECANCELED; - goto err_unlock; + rcu_read_unlock(); + kfree(entry); + return 0; } + /* Bump dev refs so they don't vanish while packet is out */ if (indev) dev_hold(indev); @@ -194,13 +182,14 @@ static int __nf_queue(struct sk_buff *skb, goto err; } - return 0; + return 1; err_unlock: rcu_read_unlock(); err: + kfree_skb(skb); kfree(entry); - return status; + return 1; } int nf_queue(struct sk_buff *skb, @@ -212,8 +201,6 @@ int nf_queue(struct sk_buff *skb, unsigned int queuenum) { struct sk_buff *segs; - int err; - unsigned int queued; if (!skb_is_gso(skb)) return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, @@ -229,35 +216,20 @@ int nf_queue(struct sk_buff *skb, } segs = skb_gso_segment(skb, 0); - /* Does not use PTR_ERR to limit the number of error codes that can be - * returned by nf_queue. For instance, callers rely on -ECANCELED to mean - * 'ignore this hook'. - */ + kfree_skb(skb); if (IS_ERR(segs)) - return -EINVAL; + return 1; - queued = 0; - err = 0; do { struct sk_buff *nskb = segs->next; segs->next = NULL; - if (err == 0) - err = __nf_queue(segs, elem, pf, hook, indev, - outdev, okfn, queuenum); - if (err == 0) - queued++; - else + if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, + queuenum)) kfree_skb(segs); segs = nskb; } while (segs); - - /* also free orig skb if only some segments were queued */ - if (unlikely(err && queued)) - err = 0; - if (err == 0) - kfree_skb(skb); - return err; + return 1; } void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) @@ -265,7 +237,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) struct sk_buff *skb = entry->skb; struct list_head *elem = &entry->elem->list; const struct nf_afinfo *afinfo; - int err; rcu_read_lock(); @@ -299,17 +270,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) local_bh_enable(); break; case NF_QUEUE: - err = __nf_queue(skb, elem, entry->pf, entry->hook, - entry->indev, entry->outdev, entry->okfn, - verdict >> NF_VERDICT_QBITS); - if (err < 0) { - if (err == -ECANCELED) - goto next_hook; - if (err == -ESRCH && - (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) - goto next_hook; - kfree_skb(skb); - } + if (!__nf_queue(skb, elem, entry->pf, entry->hook, + entry->indev, entry->outdev, entry->okfn, + verdict >> NF_VERDICT_BITS)) + goto next_hook; break; case NF_STOLEN: default: diff --git a/trunk/net/netfilter/nfnetlink_log.c b/trunk/net/netfilter/nfnetlink_log.c index 91592da504b9..6a1572b0ab41 100644 --- a/trunk/net/netfilter/nfnetlink_log.c +++ b/trunk/net/netfilter/nfnetlink_log.c @@ -874,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st) for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) - return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket])); + return rcu_dereference_bh(instance_table[st->bucket].first); } return NULL; } static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) { - h = rcu_dereference_bh(hlist_next_rcu(h)); + h = rcu_dereference_bh(h->next); while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; - h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket])); + h = rcu_dereference_bh(instance_table[st->bucket].first); } return h; } diff --git a/trunk/net/netfilter/nfnetlink_queue.c b/trunk/net/netfilter/nfnetlink_queue.c index b83123f12b42..68e67d19724d 100644 --- a/trunk/net/netfilter/nfnetlink_queue.c +++ b/trunk/net/netfilter/nfnetlink_queue.c @@ -387,31 +387,25 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) { struct sk_buff *nskb; struct nfqnl_instance *queue; - int err = -ENOBUFS; + int err; /* rcu_read_lock()ed by nf_hook_slow() */ queue = instance_lookup(queuenum); - if (!queue) { - err = -ESRCH; + if (!queue) goto err_out; - } - if (queue->copy_mode == NFQNL_COPY_NONE) { - err = -EINVAL; + if (queue->copy_mode == NFQNL_COPY_NONE) goto err_out; - } nskb = nfqnl_build_packet_message(queue, entry); - if (nskb == NULL) { - err = -ENOMEM; + if (nskb == NULL) goto err_out; - } + spin_lock_bh(&queue->lock); - if (!queue->peer_pid) { - err = -EINVAL; + if (!queue->peer_pid) goto err_out_free_nskb; - } + if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; if (net_ratelimit()) @@ -438,7 +432,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) err_out_unlock: spin_unlock_bh(&queue->lock); err_out: - return err; + return -1; } static int diff --git a/trunk/net/netfilter/x_tables.c b/trunk/net/netfilter/x_tables.c index 0a77d2ff2154..c94237631077 100644 --- a/trunk/net/netfilter/x_tables.c +++ b/trunk/net/netfilter/x_tables.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -39,8 +38,9 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) struct compat_delta { - unsigned int offset; /* offset in kernel */ - int delta; /* delta in 32bit user land */ + struct compat_delta *next; + unsigned int offset; + int delta; }; struct xt_af { @@ -49,9 +49,7 @@ struct xt_af { struct list_head target; #ifdef CONFIG_COMPAT struct mutex compat_mutex; - struct compat_delta *compat_tab; - unsigned int number; /* number of slots in compat_tab[] */ - unsigned int cur; /* number of used slots in compat_tab[] */ + struct compat_delta *compat_offsets; #endif }; @@ -416,66 +414,53 @@ int xt_check_match(struct xt_mtchk_param *par, EXPORT_SYMBOL_GPL(xt_check_match); #ifdef CONFIG_COMPAT -int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) +int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta) { - struct xt_af *xp = &xt[af]; + struct compat_delta *tmp; - if (!xp->compat_tab) { - if (!xp->number) - return -EINVAL; - xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); - if (!xp->compat_tab) - return -ENOMEM; - xp->cur = 0; - } + tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); + if (!tmp) + return -ENOMEM; - if (xp->cur >= xp->number) - return -EINVAL; + tmp->offset = offset; + tmp->delta = delta; - if (xp->cur) - delta += xp->compat_tab[xp->cur - 1].delta; - xp->compat_tab[xp->cur].offset = offset; - xp->compat_tab[xp->cur].delta = delta; - xp->cur++; + if (xt[af].compat_offsets) { + tmp->next = xt[af].compat_offsets->next; + xt[af].compat_offsets->next = tmp; + } else { + xt[af].compat_offsets = tmp; + tmp->next = NULL; + } return 0; } EXPORT_SYMBOL_GPL(xt_compat_add_offset); void xt_compat_flush_offsets(u_int8_t af) { - if (xt[af].compat_tab) { - vfree(xt[af].compat_tab); - xt[af].compat_tab = NULL; - xt[af].number = 0; + struct compat_delta *tmp, *next; + + if (xt[af].compat_offsets) { + for (tmp = xt[af].compat_offsets; tmp; tmp = next) { + next = tmp->next; + kfree(tmp); + } + xt[af].compat_offsets = NULL; } } EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); int xt_compat_calc_jump(u_int8_t af, unsigned int offset) { - struct compat_delta *tmp = xt[af].compat_tab; - int mid, left = 0, right = xt[af].cur - 1; - - while (left <= right) { - mid = (left + right) >> 1; - if (offset > tmp[mid].offset) - left = mid + 1; - else if (offset < tmp[mid].offset) - right = mid - 1; - else - return mid ? tmp[mid - 1].delta : 0; - } - WARN_ON_ONCE(1); - return 0; -} -EXPORT_SYMBOL_GPL(xt_compat_calc_jump); + struct compat_delta *tmp; + int delta; -void xt_compat_init_offsets(u_int8_t af, unsigned int number) -{ - xt[af].number = number; - xt[af].cur = 0; + for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) + if (tmp->offset < offset) + delta += tmp->delta; + return delta; } -EXPORT_SYMBOL(xt_compat_init_offsets); +EXPORT_SYMBOL_GPL(xt_compat_calc_jump); int xt_compat_match_offset(const struct xt_match *match) { @@ -835,21 +820,6 @@ xt_replace_table(struct xt_table *table, */ local_bh_enable(); -#ifdef CONFIG_AUDIT - if (audit_enabled) { - struct audit_buffer *ab; - - ab = audit_log_start(current->audit_context, GFP_KERNEL, - AUDIT_NETFILTER_CFG); - if (ab) { - audit_log_format(ab, "table=%s family=%u entries=%u", - table->name, table->af, - private->number); - audit_log_end(ab); - } - } -#endif - return private; } EXPORT_SYMBOL_GPL(xt_replace_table); @@ -1368,7 +1338,7 @@ static int __init xt_init(void) mutex_init(&xt[i].mutex); #ifdef CONFIG_COMPAT mutex_init(&xt[i].compat_mutex); - xt[i].compat_tab = NULL; + xt[i].compat_offsets = NULL; #endif INIT_LIST_HEAD(&xt[i].target); INIT_LIST_HEAD(&xt[i].match); diff --git a/trunk/net/netfilter/xt_AUDIT.c b/trunk/net/netfilter/xt_AUDIT.c deleted file mode 100644 index 81802d27346e..000000000000 --- a/trunk/net/netfilter/xt_AUDIT.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Creates audit record for dropped/accepted packets - * - * (C) 2010-2011 Thomas Graf - * (C) 2010-2011 Red Hat, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Thomas Graf "); -MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets"); -MODULE_ALIAS("ipt_AUDIT"); -MODULE_ALIAS("ip6t_AUDIT"); -MODULE_ALIAS("ebt_AUDIT"); -MODULE_ALIAS("arpt_AUDIT"); - -static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, - unsigned int proto, unsigned int offset) -{ - switch (proto) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_UDPLITE: { - const __be16 *pptr; - __be16 _ports[2]; - - pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); - if (pptr == NULL) { - audit_log_format(ab, " truncated=1"); - return; - } - - audit_log_format(ab, " sport=%hu dport=%hu", - ntohs(pptr[0]), ntohs(pptr[1])); - } - break; - - case IPPROTO_ICMP: - case IPPROTO_ICMPV6: { - const u8 *iptr; - u8 _ih[2]; - - iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); - if (iptr == NULL) { - audit_log_format(ab, " truncated=1"); - return; - } - - audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu", - iptr[0], iptr[1]); - - } - break; - } -} - -static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) -{ - struct iphdr _iph; - const struct iphdr *ih; - - ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); - if (!ih) { - audit_log_format(ab, " truncated=1"); - return; - } - - audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu", - &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol); - - if (ntohs(ih->frag_off) & IP_OFFSET) { - audit_log_format(ab, " frag=1"); - return; - } - - audit_proto(ab, skb, ih->protocol, ih->ihl * 4); -} - -static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) -{ - struct ipv6hdr _ip6h; - const struct ipv6hdr *ih; - u8 nexthdr; - int offset; - - ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); - if (!ih) { - audit_log_format(ab, " truncated=1"); - return; - } - - nexthdr = ih->nexthdr; - offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), - &nexthdr); - - audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", - &ih->saddr, &ih->daddr, nexthdr); - - if (offset) - audit_proto(ab, skb, nexthdr, offset); -} - -static unsigned int -audit_tg(struct sk_buff *skb, const struct xt_action_param *par) -{ - const struct xt_audit_info *info = par->targinfo; - struct audit_buffer *ab; - - ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); - if (ab == NULL) - goto errout; - - audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s", - info->type, par->hooknum, skb->len, - par->in ? par->in->name : "?", - par->out ? par->out->name : "?"); - - if (skb->mark) - audit_log_format(ab, " mark=%#x", skb->mark); - - if (skb->dev && skb->dev->type == ARPHRD_ETHER) { - audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x", - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, - ntohs(eth_hdr(skb)->h_proto)); - - if (par->family == NFPROTO_BRIDGE) { - switch (eth_hdr(skb)->h_proto) { - case __constant_htons(ETH_P_IP): - audit_ip4(ab, skb); - break; - - case __constant_htons(ETH_P_IPV6): - audit_ip6(ab, skb); - break; - } - } - } - - switch (par->family) { - case NFPROTO_IPV4: - audit_ip4(ab, skb); - break; - - case NFPROTO_IPV6: - audit_ip6(ab, skb); - break; - } - - audit_log_end(ab); - -errout: - return XT_CONTINUE; -} - -static int audit_tg_check(const struct xt_tgchk_param *par) -{ - const struct xt_audit_info *info = par->targinfo; - - if (info->type > XT_AUDIT_TYPE_MAX) { - pr_info("Audit type out of range (valid range: 0..%hhu)\n", - XT_AUDIT_TYPE_MAX); - return -ERANGE; - } - - return 0; -} - -static struct xt_target audit_tg_reg __read_mostly = { - .name = "AUDIT", - .family = NFPROTO_UNSPEC, - .target = audit_tg, - .targetsize = sizeof(struct xt_audit_info), - .checkentry = audit_tg_check, - .me = THIS_MODULE, -}; - -static int __init audit_tg_init(void) -{ - return xt_register_target(&audit_tg_reg); -} - -static void __exit audit_tg_exit(void) -{ - xt_unregister_target(&audit_tg_reg); -} - -module_init(audit_tg_init); -module_exit(audit_tg_exit); diff --git a/trunk/net/netfilter/xt_CLASSIFY.c b/trunk/net/netfilter/xt_CLASSIFY.c index af9c4dadf816..c2c0e4abeb99 100644 --- a/trunk/net/netfilter/xt_CLASSIFY.c +++ b/trunk/net/netfilter/xt_CLASSIFY.c @@ -19,14 +19,12 @@ #include #include #include -#include MODULE_AUTHOR("Patrick McHardy "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: Qdisc classification"); MODULE_ALIAS("ipt_CLASSIFY"); MODULE_ALIAS("ip6t_CLASSIFY"); -MODULE_ALIAS("arpt_CLASSIFY"); static unsigned int classify_tg(struct sk_buff *skb, const struct xt_action_param *par) @@ -37,36 +35,26 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par) return XT_CONTINUE; } -static struct xt_target classify_tg_reg[] __read_mostly = { - { - .name = "CLASSIFY", - .revision = 0, - .family = NFPROTO_UNSPEC, - .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) | - (1 << NF_INET_POST_ROUTING), - .target = classify_tg, - .targetsize = sizeof(struct xt_classify_target_info), - .me = THIS_MODULE, - }, - { - .name = "CLASSIFY", - .revision = 0, - .family = NFPROTO_ARP, - .hooks = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD), - .target = classify_tg, - .targetsize = sizeof(struct xt_classify_target_info), - .me = THIS_MODULE, - }, +static struct xt_target classify_tg_reg __read_mostly = { + .name = "CLASSIFY", + .revision = 0, + .family = NFPROTO_UNSPEC, + .table = "mangle", + .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) | + (1 << NF_INET_POST_ROUTING), + .target = classify_tg, + .targetsize = sizeof(struct xt_classify_target_info), + .me = THIS_MODULE, }; static int __init classify_tg_init(void) { - return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg)); + return xt_register_target(&classify_tg_reg); } static void __exit classify_tg_exit(void) { - xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg)); + xt_unregister_target(&classify_tg_reg); } module_init(classify_tg_init); diff --git a/trunk/net/netfilter/xt_IDLETIMER.c b/trunk/net/netfilter/xt_IDLETIMER.c index 3bdd443aaf15..be1f22e13545 100644 --- a/trunk/net/netfilter/xt_IDLETIMER.c +++ b/trunk/net/netfilter/xt_IDLETIMER.c @@ -313,5 +313,3 @@ MODULE_AUTHOR("Timo Teras "); MODULE_AUTHOR("Luciano Coelho "); MODULE_DESCRIPTION("Xtables: idle time monitor"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ipt_IDLETIMER"); -MODULE_ALIAS("ip6t_IDLETIMER"); diff --git a/trunk/net/netfilter/xt_LED.c b/trunk/net/netfilter/xt_LED.c index 993de2ba89d3..a4140509eea1 100644 --- a/trunk/net/netfilter/xt_LED.c +++ b/trunk/net/netfilter/xt_LED.c @@ -31,8 +31,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adam Nielsen "); MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match"); -MODULE_ALIAS("ipt_LED"); -MODULE_ALIAS("ip6t_LED"); static LIST_HEAD(xt_led_triggers); static DEFINE_MUTEX(xt_led_mutex); diff --git a/trunk/net/netfilter/xt_NFQUEUE.c b/trunk/net/netfilter/xt_NFQUEUE.c index d4f4b5d66b20..039cce1bde3d 100644 --- a/trunk/net/netfilter/xt_NFQUEUE.c +++ b/trunk/net/netfilter/xt_NFQUEUE.c @@ -72,31 +72,18 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) if (info->queues_total > 1) { if (par->family == NFPROTO_IPV4) - queue = (((u64) hash_v4(skb) * info->queues_total) >> - 32) + queue; + queue = hash_v4(skb) % info->queues_total + queue; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) else if (par->family == NFPROTO_IPV6) - queue = (((u64) hash_v6(skb) * info->queues_total) >> - 32) + queue; + queue = hash_v6(skb) % info->queues_total + queue; #endif } return NF_QUEUE_NR(queue); } -static unsigned int -nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) -{ - const struct xt_NFQ_info_v2 *info = par->targinfo; - unsigned int ret = nfqueue_tg_v1(skb, par); - - if (info->bypass) - ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; - return ret; -} - -static int nfqueue_tg_check(const struct xt_tgchk_param *par) +static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par) { - const struct xt_NFQ_info_v2 *info = par->targinfo; + const struct xt_NFQ_info_v1 *info = par->targinfo; u32 maxid; if (unlikely(!rnd_inited)) { @@ -113,8 +100,6 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par) info->queues_total, maxid); return -ERANGE; } - if (par->target->revision == 2 && info->bypass > 1) - return -EINVAL; return 0; } @@ -130,20 +115,11 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = { .name = "NFQUEUE", .revision = 1, .family = NFPROTO_UNSPEC, - .checkentry = nfqueue_tg_check, + .checkentry = nfqueue_tg_v1_check, .target = nfqueue_tg_v1, .targetsize = sizeof(struct xt_NFQ_info_v1), .me = THIS_MODULE, }, - { - .name = "NFQUEUE", - .revision = 2, - .family = NFPROTO_UNSPEC, - .checkentry = nfqueue_tg_check, - .target = nfqueue_tg_v2, - .targetsize = sizeof(struct xt_NFQ_info_v2), - .me = THIS_MODULE, - }, }; static int __init nfqueue_tg_init(void) diff --git a/trunk/net/netfilter/xt_connlimit.c b/trunk/net/netfilter/xt_connlimit.c index e029c4807404..5c5b6b921b84 100644 --- a/trunk/net/netfilter/xt_connlimit.c +++ b/trunk/net/netfilter/xt_connlimit.c @@ -185,24 +185,18 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) int connections; ct = nf_ct_get(skb, &ctinfo); - if (ct != NULL) { - if (info->flags & XT_CONNLIMIT_DADDR) - tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; - else - tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; - } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), - par->family, &tuple)) { + if (ct != NULL) + tuple_ptr = &ct->tuplehash[0].tuple; + else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + par->family, &tuple)) goto hotdrop; - } if (par->family == NFPROTO_IPV6) { const struct ipv6hdr *iph = ipv6_hdr(skb); - memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ? - &iph->daddr : &iph->saddr, sizeof(addr.ip6)); + memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr)); } else { const struct iphdr *iph = ip_hdr(skb); - addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ? - iph->daddr : iph->saddr; + addr.ip = iph->saddr; } spin_lock_bh(&info->data->lock); @@ -210,12 +204,13 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) &info->mask, par->family); spin_unlock_bh(&info->data->lock); - if (connections < 0) + if (connections < 0) { /* kmalloc failed, drop it entirely */ - goto hotdrop; + par->hotdrop = true; + return false; + } - return (connections > info->limit) ^ - !!(info->flags & XT_CONNLIMIT_INVERT); + return (connections > info->limit) ^ info->inverse; hotdrop: par->hotdrop = true; @@ -273,38 +268,25 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) kfree(info->data); } -static struct xt_match connlimit_mt_reg[] __read_mostly = { - { - .name = "connlimit", - .revision = 0, - .family = NFPROTO_UNSPEC, - .checkentry = connlimit_mt_check, - .match = connlimit_mt, - .matchsize = sizeof(struct xt_connlimit_info), - .destroy = connlimit_mt_destroy, - .me = THIS_MODULE, - }, - { - .name = "connlimit", - .revision = 1, - .family = NFPROTO_UNSPEC, - .checkentry = connlimit_mt_check, - .match = connlimit_mt, - .matchsize = sizeof(struct xt_connlimit_info), - .destroy = connlimit_mt_destroy, - .me = THIS_MODULE, - }, +static struct xt_match connlimit_mt_reg __read_mostly = { + .name = "connlimit", + .revision = 0, + .family = NFPROTO_UNSPEC, + .checkentry = connlimit_mt_check, + .match = connlimit_mt, + .matchsize = sizeof(struct xt_connlimit_info), + .destroy = connlimit_mt_destroy, + .me = THIS_MODULE, }; static int __init connlimit_mt_init(void) { - return xt_register_matches(connlimit_mt_reg, - ARRAY_SIZE(connlimit_mt_reg)); + return xt_register_match(&connlimit_mt_reg); } static void __exit connlimit_mt_exit(void) { - xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg)); + xt_unregister_match(&connlimit_mt_reg); } module_init(connlimit_mt_init); diff --git a/trunk/net/netfilter/xt_conntrack.c b/trunk/net/netfilter/xt_conntrack.c index 4ef1b63ad73f..e536710ad916 100644 --- a/trunk/net/netfilter/xt_conntrack.c +++ b/trunk/net/netfilter/xt_conntrack.c @@ -112,54 +112,6 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, return true; } -static inline bool -port_match(u16 min, u16 max, u16 port, bool invert) -{ - return (port >= min && port <= max) ^ invert; -} - -static inline bool -ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info, - const struct nf_conn *ct) -{ - const struct nf_conntrack_tuple *tuple; - - tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; - if ((info->match_flags & XT_CONNTRACK_PROTO) && - (nf_ct_protonum(ct) == info->l4proto) ^ - !(info->invert_flags & XT_CONNTRACK_PROTO)) - return false; - - /* Shortcut to match all recognized protocols by using ->src.all. */ - if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) && - !port_match(info->origsrc_port, info->origsrc_port_high, - ntohs(tuple->src.u.all), - info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT)) - return false; - - if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) && - !port_match(info->origdst_port, info->origdst_port_high, - ntohs(tuple->dst.u.all), - info->invert_flags & XT_CONNTRACK_ORIGDST_PORT)) - return false; - - tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; - - if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) && - !port_match(info->replsrc_port, info->replsrc_port_high, - ntohs(tuple->src.u.all), - info->invert_flags & XT_CONNTRACK_REPLSRC_PORT)) - return false; - - if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) && - !port_match(info->repldst_port, info->repldst_port_high, - ntohs(tuple->dst.u.all), - info->invert_flags & XT_CONNTRACK_REPLDST_PORT)) - return false; - - return true; -} - static bool conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, u16 state_mask, u16 status_mask) @@ -218,13 +170,8 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, !(info->invert_flags & XT_CONNTRACK_REPLDST)) return false; - if (par->match->revision != 3) { - if (!ct_proto_port_check(info, ct)) - return false; - } else { - if (!ct_proto_port_check_v3(par->matchinfo, ct)) - return false; - } + if (!ct_proto_port_check(info, ct)) + return false; if ((info->match_flags & XT_CONNTRACK_STATUS) && (!!(status_mask & ct->status) ^ @@ -260,14 +207,6 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) return conntrack_mt(skb, par, info->state_mask, info->status_mask); } -static bool -conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par) -{ - const struct xt_conntrack_mtinfo3 *info = par->matchinfo; - - return conntrack_mt(skb, par, info->state_mask, info->status_mask); -} - static int conntrack_mt_check(const struct xt_mtchk_param *par) { int ret; @@ -305,16 +244,6 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { .destroy = conntrack_mt_destroy, .me = THIS_MODULE, }, - { - .name = "conntrack", - .revision = 3, - .family = NFPROTO_UNSPEC, - .matchsize = sizeof(struct xt_conntrack_mtinfo3), - .match = conntrack_mt_v3, - .checkentry = conntrack_mt_check, - .destroy = conntrack_mt_destroy, - .me = THIS_MODULE, - }, }; static int __init conntrack_mt_init(void) diff --git a/trunk/net/netfilter/xt_cpu.c b/trunk/net/netfilter/xt_cpu.c index c7a2e5466bc4..b39db8a5cbae 100644 --- a/trunk/net/netfilter/xt_cpu.c +++ b/trunk/net/netfilter/xt_cpu.c @@ -22,8 +22,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Dumazet "); MODULE_DESCRIPTION("Xtables: CPU match"); -MODULE_ALIAS("ipt_cpu"); -MODULE_ALIAS("ip6t_cpu"); static int cpu_mt_check(const struct xt_mtchk_param *par) { diff --git a/trunk/net/netfilter/xt_iprange.c b/trunk/net/netfilter/xt_iprange.c index d3eb5ed1892f..88f7c3511c72 100644 --- a/trunk/net/netfilter/xt_iprange.c +++ b/trunk/net/netfilter/xt_iprange.c @@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par) pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n", &iph->saddr, (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "", - &info->src_min.ip, + &info->src_max.ip, &info->src_max.ip); return false; } @@ -78,27 +78,15 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par) m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0; m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0; m ^= !!(info->flags & IPRANGE_SRC_INV); - if (m) { - pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n", - &iph->saddr, - (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "", - &info->src_min.in6, - &info->src_max.in6); + if (m) return false; - } } if (info->flags & IPRANGE_DST) { m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0; m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0; m ^= !!(info->flags & IPRANGE_DST_INV); - if (m) { - pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n", - &iph->daddr, - (info->flags & IPRANGE_DST_INV) ? "(INV) " : "", - &info->dst_min.in6, - &info->dst_max.in6); + if (m) return false; - } } return true; } diff --git a/trunk/net/netfilter/xt_ipvs.c b/trunk/net/netfilter/xt_ipvs.c index bb10b0717f1b..9127a3d8aa35 100644 --- a/trunk/net/netfilter/xt_ipvs.c +++ b/trunk/net/netfilter/xt_ipvs.c @@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par) /* * Check if the packet belongs to an existing entry */ - cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */); + cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */); if (unlikely(cp == NULL)) { match = false; goto out; diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index c60649ec1193..91cb1d71f018 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -164,6 +164,7 @@ struct packet_mreq_max { static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing, int tx_ring); +#define PGV_FROM_VMALLOC 1 struct pgv { char *buffer; }; @@ -522,11 +523,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb, { struct sk_filter *filter; - rcu_read_lock(); - filter = rcu_dereference(sk->sk_filter); + rcu_read_lock_bh(); + filter = rcu_dereference_bh(sk->sk_filter); if (filter != NULL) res = sk_run_filter(skb, filter->insns); - rcu_read_unlock(); + rcu_read_unlock_bh(); return res; } diff --git a/trunk/net/rds/rds.h b/trunk/net/rds/rds.h index da8adac2bf06..9542449c0720 100644 --- a/trunk/net/rds/rds.h +++ b/trunk/net/rds/rds.h @@ -50,6 +50,7 @@ rdsdebug(char *fmt, ...) #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) #define RDS_CONG_MAP_BYTES (65536 / 8) +#define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long)) #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE) #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8) diff --git a/trunk/net/sched/Kconfig b/trunk/net/sched/Kconfig index e318f458713e..f04d4a484d53 100644 --- a/trunk/net/sched/Kconfig +++ b/trunk/net/sched/Kconfig @@ -205,18 +205,6 @@ config NET_SCH_DRR If unsure, say N. -config NET_SCH_MQPRIO - tristate "Multi-queue priority scheduler (MQPRIO)" - help - Say Y here if you want to use the Multi-queue Priority scheduler. - This scheduler allows QOS to be offloaded on NICs that have support - for offloading QOS schedulers. - - To compile this driver as a module, choose M here: the module will - be called sch_mqprio. - - If unsure, say N. - config NET_SCH_INGRESS tristate "Ingress Qdisc" depends on NET_CLS_ACT @@ -255,7 +243,7 @@ config NET_CLS_TCINDEX config NET_CLS_ROUTE4 tristate "Routing decision (ROUTE)" - select IP_ROUTE_CLASSID + select NET_CLS_ROUTE select NET_CLS ---help--- If you say Y here, you will be able to classify packets @@ -264,6 +252,9 @@ config NET_CLS_ROUTE4 To compile this code as a module, choose M here: the module will be called cls_route. +config NET_CLS_ROUTE + bool + config NET_CLS_FW tristate "Netfilter mark (FW)" select NET_CLS diff --git a/trunk/net/sched/Makefile b/trunk/net/sched/Makefile index 26ce681a2c60..960f5dba6304 100644 --- a/trunk/net/sched/Makefile +++ b/trunk/net/sched/Makefile @@ -32,7 +32,6 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o -obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o obj-$(CONFIG_NET_CLS_FW) += cls_fw.o diff --git a/trunk/net/sched/act_api.c b/trunk/net/sched/act_api.c index 15873e14cb54..23b25f89e7e0 100644 --- a/trunk/net/sched/act_api.c +++ b/trunk/net/sched/act_api.c @@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p; - int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; + int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; struct nlattr *nest; read_lock_bh(hinfo->lock); @@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, { struct tcf_common *p, *s_p; struct nlattr *nest; - int i = 0, n_i = 0; + int i= 0, n_i = 0; nest = nla_nest_start(skb, a->order); if (nest == NULL) @@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, while (p != NULL) { s_p = p->tcfc_next; if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) - module_put(a->ops->owner); + module_put(a->ops->owner); n_i++; p = s_p; } @@ -447,8 +447,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - err = tcf_action_dump_old(skb, a, bind, ref); - if (err > 0) { + if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { nla_nest_end(skb, nest); return err; } @@ -492,7 +491,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; - struct nlattr *tb[TCA_ACT_MAX + 1]; + struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; int err; @@ -550,9 +549,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, goto err_free; /* module count goes up only when brand new policy is created - * if it exists and is only bound to in a_o->init() then - * ACT_P_CREATED is not returned (a zero is). - */ + if it exists and is only bound to in a_o->init() then + ACT_P_CREATED is not returned (a zero is). + */ if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; @@ -570,7 +569,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { - struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; + struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct tc_action *head = NULL, *act, *act_prev = NULL; int err; int i; @@ -698,7 +697,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, static struct tc_action * tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) { - struct nlattr *tb[TCA_ACT_MAX + 1]; + struct nlattr *tb[TCA_ACT_MAX+1]; struct tc_action *a; int index; int err; @@ -771,7 +770,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; - struct nlattr *tb[TCA_ACT_MAX + 1]; + struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; @@ -822,8 +821,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); - err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); + err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); if (err > 0) return 0; @@ -844,14 +842,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) { int i, ret; - struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; + struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; - if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { + if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(net, tb[1], n, pid); else @@ -894,7 +892,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); + n->nlmsg_flags&NLM_F_ECHO); if (ret > 0) return 0; return ret; @@ -938,7 +936,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a, nlh->nlmsg_len = skb_tail_pointer(skb) - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; - err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); + err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); if (err > 0) err = 0; return err; @@ -969,7 +967,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, /* dump then free all the actions after update; inserted policy * stays intact - */ + * */ ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); for (a = act; a; a = act) { act = a->next; @@ -995,7 +993,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return -EINVAL; } - /* n->nlmsg_flags & NLM_F_CREATE */ + /* n->nlmsg_flags&NLM_F_CREATE + * */ switch (n->nlmsg_type) { case RTM_NEWACTION: /* we are going to assume all other flags @@ -1004,7 +1003,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) * but since we want avoid ambiguity (eg when flags * is zero) then just set this */ - if (n->nlmsg_flags & NLM_F_REPLACE) + if (n->nlmsg_flags&NLM_F_REPLACE) ovr = 1; replay: ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); @@ -1029,7 +1028,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) static struct nlattr * find_dump_kind(const struct nlmsghdr *n) { - struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; + struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; @@ -1072,8 +1071,9 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) } a_o = tc_lookup_action(kind); - if (a_o == NULL) + if (a_o == NULL) { return 0; + } memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; diff --git a/trunk/net/sched/act_csum.c b/trunk/net/sched/act_csum.c index 6cdf9abe475f..83ddfc07e45d 100644 --- a/trunk/net/sched/act_csum.c +++ b/trunk/net/sched/act_csum.c @@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy); + err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy); if (err < 0) return err; diff --git a/trunk/net/sched/act_gact.c b/trunk/net/sched/act_gact.c index 2b4ab4b05ce8..c2ed90a4c0b4 100644 --- a/trunk/net/sched/act_gact.c +++ b/trunk/net/sched/act_gact.c @@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact) } typedef int (*g_rand)(struct tcf_gact *gact); -static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ }; +static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; #endif /* CONFIG_GACT_PROB */ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { @@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind, &gact_idx_gen, &gact_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { @@ -205,9 +205,9 @@ MODULE_LICENSE("GPL"); static int __init gact_init_module(void) { #ifdef CONFIG_GACT_PROB - pr_info("GACT probability on\n"); + printk(KERN_INFO "GACT probability on\n"); #else - pr_info("GACT probability NOT on\n"); + printk(KERN_INFO "GACT probability NOT on\n"); #endif return tcf_register_action(&act_gact_ops); } diff --git a/trunk/net/sched/act_ipt.c b/trunk/net/sched/act_ipt.c index 9fc211a1b20e..c2a7c20e81c1 100644 --- a/trunk/net/sched/act_ipt.c +++ b/trunk/net/sched/act_ipt.c @@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, &ipt_idx_gen, &ipt_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { @@ -162,8 +162,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, if (unlikely(!t)) goto err2; - err = ipt_init_target(t, tname, hook); - if (err < 0) + if ((err = ipt_init_target(t, tname, hook)) < 0) goto err3; spin_lock_bh(&ipt->tcf_lock); @@ -213,9 +212,8 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, bstats_update(&ipt->tcf_bstats, skb); /* yes, we have to worry about both in and out dev - * worry later - danger - this API seems to have changed - * from earlier kernels - */ + worry later - danger - this API seems to have changed + from earlier kernels */ par.in = skb->dev; par.out = NULL; par.hooknum = ipt->tcfi_hook; @@ -255,9 +253,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int struct tc_cnt c; /* for simple targets kernel size == user size - * user name = target name - * for foolproof you need to not assume this - */ + ** user name = target name + ** for foolproof you need to not assume this + */ t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); if (unlikely(!t)) diff --git a/trunk/net/sched/act_mirred.c b/trunk/net/sched/act_mirred.c index 961386e2f2c0..d765067e99db 100644 --- a/trunk/net/sched/act_mirred.c +++ b/trunk/net/sched/act_mirred.c @@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = { .lock = &mirred_lock, }; -static int tcf_mirred_release(struct tcf_mirred *m, int bind) +static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) { if (m) { if (bind) m->tcf_bindcnt--; m->tcf_refcnt--; - if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) { + if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { list_del(&m->tcfm_list); if (m->tcfm_dev) dev_put(m->tcfm_dev); diff --git a/trunk/net/sched/act_nat.c b/trunk/net/sched/act_nat.c index 762b027650a9..178a4bd7b7cb 100644 --- a/trunk/net/sched/act_nat.c +++ b/trunk/net/sched/act_nat.c @@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &nat_idx_gen, &nat_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); p = to_tcf_nat(pc); ret = ACT_P_CREATED; } else { diff --git a/trunk/net/sched/act_pedit.c b/trunk/net/sched/act_pedit.c index 50c7c06c019d..445bef716f77 100644 --- a/trunk/net/sched/act_pedit.c +++ b/trunk/net/sched/act_pedit.c @@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &pedit_idx_gen, &pedit_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { @@ -127,9 +127,11 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, int i, munged = 0; unsigned int off; - if (skb_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - return p->tcf_action; + if (skb_cloned(skb)) { + if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + return p->tcf_action; + } + } off = skb_network_offset(skb); diff --git a/trunk/net/sched/act_police.c b/trunk/net/sched/act_police.c index 8a1630774fd6..e2f08b1e2e58 100644 --- a/trunk/net/sched/act_police.c +++ b/trunk/net/sched/act_police.c @@ -22,8 +22,8 @@ #include #include -#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) -#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) +#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L) +#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L) #define POL_TAB_MASK 15 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; @@ -37,7 +37,8 @@ static struct tcf_hashinfo police_hash_info = { }; /* old policer structure from before tc actions */ -struct tc_police_compat { +struct tc_police_compat +{ u32 index; int action; u32 limit; @@ -138,7 +139,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { - unsigned int h; + unsigned h; int ret = 0, err; struct nlattr *tb[TCA_POLICE_MAX + 1]; struct tc_police *parm; diff --git a/trunk/net/sched/act_simple.c b/trunk/net/sched/act_simple.c index a34a22de60b3..7287cff7af3e 100644 --- a/trunk/net/sched/act_simple.c +++ b/trunk/net/sched/act_simple.c @@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result /* print policy string followed by _ then packet count * Example if this was the 3rd packet and the string was "hello" * then it would look like "hello_3" (without quotes) - */ + **/ pr_info("simple: %s_%d\n", (char *)d->tcfd_defdata, d->tcf_bstats.packets); spin_unlock(&d->tcf_lock); @@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &simp_idx_gen, &simp_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); d = to_defact(pc); ret = alloc_defdata(d, defdata); @@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, return ret; } -static int tcf_simp_cleanup(struct tc_action *a, int bind) +static inline int tcf_simp_cleanup(struct tc_action *a, int bind) { struct tcf_defact *d = a->priv; @@ -158,8 +158,8 @@ static int tcf_simp_cleanup(struct tc_action *a, int bind) return 0; } -static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, - int bind, int ref) +static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_defact *d = a->priv; diff --git a/trunk/net/sched/act_skbedit.c b/trunk/net/sched/act_skbedit.c index 5f6f0c7c3905..836f5fee9e58 100644 --- a/trunk/net/sched/act_skbedit.c +++ b/trunk/net/sched/act_skbedit.c @@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &skbedit_idx_gen, &skbedit_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); d = to_skbedit(pc); ret = ACT_P_CREATED; @@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, return ret; } -static int tcf_skbedit_cleanup(struct tc_action *a, int bind) +static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) { struct tcf_skbedit *d = a->priv; @@ -153,8 +153,8 @@ static int tcf_skbedit_cleanup(struct tc_action *a, int bind) return 0; } -static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, - int bind, int ref) +static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; diff --git a/trunk/net/sched/cls_api.c b/trunk/net/sched/cls_api.c index bb2c523f8158..5fd0c28ef79a 100644 --- a/trunk/net/sched/cls_api.c +++ b/trunk/net/sched/cls_api.c @@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) int rc = -ENOENT; write_lock(&cls_mod_lock); - for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) + for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) if (t == ops) break; @@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp) u32 first = TC_H_MAKE(0xC0000000U, 0U); if (tp) - first = tp->prio - 1; + first = tp->prio-1; return first; } @@ -149,8 +149,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (prio == 0) { /* If no priority is given, user wants we allocated it. */ - if (n->nlmsg_type != RTM_NEWTFILTER || - !(n->nlmsg_flags & NLM_F_CREATE)) + if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) return -ENOENT; prio = TC_H_MAKE(0x80000000U, 0U); } @@ -177,8 +176,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } /* Is it classful? */ - cops = q->ops->cl_ops; - if (!cops) + if ((cops = q->ops->cl_ops) == NULL) return -EINVAL; if (cops->tcf_chain == NULL) @@ -198,11 +196,10 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) goto errout; /* Check the chain for existence of proto-tcf with this priority */ - for (back = chain; (tp = *back) != NULL; back = &tp->next) { + for (back = chain; (tp=*back) != NULL; back = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { - if (!nprio || - (tp->protocol != protocol && protocol)) + if (!nprio || (tp->protocol != protocol && protocol)) goto errout; } else tp = NULL; @@ -219,8 +216,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) goto errout; err = -ENOENT; - if (n->nlmsg_type != RTM_NEWTFILTER || - !(n->nlmsg_flags & NLM_F_CREATE)) + if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) goto errout; @@ -424,8 +420,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return skb->len; - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) + if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) return skb->len; if (!tcm->tcm_parent) @@ -434,8 +429,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) goto out; - cops = q->ops->cl_ops; - if (!cops) + if ((cops = q->ops->cl_ops) == NULL) goto errout; if (cops->tcf_chain == NULL) goto errout; @@ -450,9 +444,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tp = *chain, t = 0; tp; tp = tp->next, t++) { - if (t < s_t) - continue; + for (tp=*chain, t=0; tp; tp = tp->next, t++) { + if (t < s_t) continue; if (TC_H_MAJ(tcm->tcm_info) && TC_H_MAJ(tcm->tcm_info) != tp->prio) continue; @@ -475,10 +468,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) arg.skb = skb; arg.cb = cb; arg.w.stop = 0; - arg.w.skip = cb->args[1] - 1; + arg.w.skip = cb->args[1]-1; arg.w.count = 0; tp->ops->walk(tp, &arg.w); - cb->args[1] = arg.w.count + 1; + cb->args[1] = arg.w.count+1; if (arg.w.stop) break; } diff --git a/trunk/net/sched/cls_basic.c b/trunk/net/sched/cls_basic.c index 8be8872dd571..f23d9155b1ef 100644 --- a/trunk/net/sched/cls_basic.c +++ b/trunk/net/sched/cls_basic.c @@ -21,12 +21,14 @@ #include #include -struct basic_head { +struct basic_head +{ u32 hgenerator; struct list_head flist; }; -struct basic_filter { +struct basic_filter +{ u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; @@ -90,7 +92,8 @@ static int basic_init(struct tcf_proto *tp) return 0; } -static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) +static inline void basic_delete_filter(struct tcf_proto *tp, + struct basic_filter *f) { tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); @@ -132,9 +135,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = { [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, }; -static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, - unsigned long base, struct nlattr **tb, - struct nlattr *est) +static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, + unsigned long base, struct nlattr **tb, + struct nlattr *est) { int err = -EINVAL; struct tcf_exts e; @@ -200,7 +203,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, } while (--i > 0 && basic_get(tp, head->hgenerator)); if (i <= 0) { - pr_err("Insufficient number of handles\n"); + printk(KERN_ERR "Insufficient number of handles\n"); goto errout; } diff --git a/trunk/net/sched/cls_cgroup.c b/trunk/net/sched/cls_cgroup.c index 32a335194ca5..d49c40fb7e09 100644 --- a/trunk/net/sched/cls_cgroup.c +++ b/trunk/net/sched/cls_cgroup.c @@ -56,8 +56,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, { struct cgroup_cls_state *cs; - cs = kzalloc(sizeof(*cs), GFP_KERNEL); - if (!cs) + if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) return ERR_PTR(-ENOMEM); if (cgrp->parent) @@ -95,7 +94,8 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); } -struct cls_cgroup_head { +struct cls_cgroup_head +{ u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; @@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg) { - struct nlattr *tb[TCA_CGROUP_MAX + 1]; + struct nlattr *tb[TCA_CGROUP_MAX+1]; struct cls_cgroup_head *head = tp->root; struct tcf_ematch_tree t; struct tcf_exts e; diff --git a/trunk/net/sched/cls_flow.c b/trunk/net/sched/cls_flow.c index 8ec01391d988..5b271a18bc3a 100644 --- a/trunk/net/sched/cls_flow.c +++ b/trunk/net/sched/cls_flow.c @@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb) if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ip_hdr(skb); - if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) break; poff = proto_ports_offset(iph->protocol); if (poff >= 0 && @@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb) if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ip_hdr(skb); - if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) break; poff = proto_ports_offset(iph->protocol); if (poff >= 0 && @@ -276,7 +276,7 @@ static u32 flow_get_nfct_proto_dst(struct sk_buff *skb) static u32 flow_get_rtclassid(const struct sk_buff *skb) { -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE if (skb_dst(skb)) return skb_dst(skb)->tclassid; #endif diff --git a/trunk/net/sched/cls_fw.c b/trunk/net/sched/cls_fw.c index 26e7bc4ffb79..93b0a7b6f9b4 100644 --- a/trunk/net/sched/cls_fw.c +++ b/trunk/net/sched/cls_fw.c @@ -31,12 +31,14 @@ #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) -struct fw_head { +struct fw_head +{ struct fw_filter *ht[HTSIZE]; u32 mask; }; -struct fw_filter { +struct fw_filter +{ struct fw_filter *next; u32 id; struct tcf_result res; @@ -51,7 +53,7 @@ static const struct tcf_ext_map fw_ext_map = { .police = TCA_FW_POLICE }; -static inline int fw_hash(u32 handle) +static __inline__ int fw_hash(u32 handle) { if (HTSIZE == 4096) return ((handle >> 24) & 0xFFF) ^ @@ -80,14 +82,14 @@ static inline int fw_hash(u32 handle) static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { - struct fw_head *head = (struct fw_head *)tp->root; + struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; int r; u32 id = skb->mark; if (head != NULL) { id &= head->mask; - for (f = head->ht[fw_hash(id)]; f; f = f->next) { + for (f=head->ht[fw_hash(id)]; f; f=f->next) { if (f->id == id) { *res = f->res; #ifdef CONFIG_NET_CLS_IND @@ -103,8 +105,7 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, } } else { /* old method */ - if (id && (TC_H_MAJ(id) == 0 || - !(TC_H_MAJ(id ^ tp->q->handle)))) { + if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) { res->classid = id; res->class = 0; return 0; @@ -116,13 +117,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, static unsigned long fw_get(struct tcf_proto *tp, u32 handle) { - struct fw_head *head = (struct fw_head *)tp->root; + struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; if (head == NULL) return 0; - for (f = head->ht[fw_hash(handle)]; f; f = f->next) { + for (f=head->ht[fw_hash(handle)]; f; f=f->next) { if (f->id == handle) return (unsigned long)f; } @@ -138,7 +139,8 @@ static int fw_init(struct tcf_proto *tp) return 0; } -static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) +static inline void +fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) { tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); @@ -154,8 +156,8 @@ static void fw_destroy(struct tcf_proto *tp) if (head == NULL) return; - for (h = 0; h < HTSIZE; h++) { - while ((f = head->ht[h]) != NULL) { + for (h=0; hht[h]) != NULL) { head->ht[h] = f->next; fw_delete_filter(tp, f); } @@ -165,14 +167,14 @@ static void fw_destroy(struct tcf_proto *tp) static int fw_delete(struct tcf_proto *tp, unsigned long arg) { - struct fw_head *head = (struct fw_head *)tp->root; - struct fw_filter *f = (struct fw_filter *)arg; + struct fw_head *head = (struct fw_head*)tp->root; + struct fw_filter *f = (struct fw_filter*)arg; struct fw_filter **fp; if (head == NULL || f == NULL) goto out; - for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { + for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { if (*fp == f) { tcf_tree_lock(tp); *fp = f->next; @@ -238,7 +240,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, struct nlattr **tca, unsigned long *arg) { - struct fw_head *head = (struct fw_head *)tp->root; + struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f = (struct fw_filter *) *arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FW_MAX + 1]; @@ -300,7 +302,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base, static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct fw_head *head = (struct fw_head *)tp->root; + struct fw_head *head = (struct fw_head*)tp->root; int h; if (head == NULL) @@ -330,7 +332,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct fw_head *head = (struct fw_head *)tp->root; - struct fw_filter *f = (struct fw_filter *)fh; + struct fw_filter *f = (struct fw_filter*)fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; diff --git a/trunk/net/sched/cls_route.c b/trunk/net/sched/cls_route.c index d580cdfca093..694dcd85dec8 100644 --- a/trunk/net/sched/cls_route.c +++ b/trunk/net/sched/cls_route.c @@ -23,30 +23,34 @@ #include /* - * 1. For now we assume that route tags < 256. - * It allows to use direct table lookups, instead of hash tables. - * 2. For now we assume that "from TAG" and "fromdev DEV" statements - * are mutually exclusive. - * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" + 1. For now we assume that route tags < 256. + It allows to use direct table lookups, instead of hash tables. + 2. For now we assume that "from TAG" and "fromdev DEV" statements + are mutually exclusive. + 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" */ -struct route4_fastmap { +struct route4_fastmap +{ struct route4_filter *filter; u32 id; int iif; }; -struct route4_head { +struct route4_head +{ struct route4_fastmap fastmap[16]; - struct route4_bucket *table[256 + 1]; + struct route4_bucket *table[256+1]; }; -struct route4_bucket { +struct route4_bucket +{ /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ - struct route4_filter *ht[16 + 16 + 1]; + struct route4_filter *ht[16+16+1]; }; -struct route4_filter { +struct route4_filter +{ struct route4_filter *next; u32 id; int iif; @@ -57,20 +61,20 @@ struct route4_filter { struct route4_bucket *bkt; }; -#define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) +#define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) static const struct tcf_ext_map route_ext_map = { .police = TCA_ROUTE4_POLICE, .action = TCA_ROUTE4_ACT }; -static inline int route4_fastmap_hash(u32 id, int iif) +static __inline__ int route4_fastmap_hash(u32 id, int iif) { - return id & 0xF; + return id&0xF; } -static void -route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) +static inline +void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) { spinlock_t *root_lock = qdisc_root_sleeping_lock(q); @@ -79,33 +83,32 @@ route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) spin_unlock_bh(root_lock); } -static void +static inline void route4_set_fastmap(struct route4_head *head, u32 id, int iif, struct route4_filter *f) { int h = route4_fastmap_hash(id, iif); - head->fastmap[h].id = id; head->fastmap[h].iif = iif; head->fastmap[h].filter = f; } -static inline int route4_hash_to(u32 id) +static __inline__ int route4_hash_to(u32 id) { - return id & 0xFF; + return id&0xFF; } -static inline int route4_hash_from(u32 id) +static __inline__ int route4_hash_from(u32 id) { - return (id >> 16) & 0xF; + return (id>>16)&0xF; } -static inline int route4_hash_iif(int iif) +static __inline__ int route4_hash_iif(int iif) { - return 16 + ((iif >> 16) & 0xF); + return 16 + ((iif>>16)&0xF); } -static inline int route4_hash_wild(void) +static __inline__ int route4_hash_wild(void) { return 32; } @@ -128,22 +131,21 @@ static inline int route4_hash_wild(void) static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { - struct route4_head *head = (struct route4_head *)tp->root; + struct route4_head *head = (struct route4_head*)tp->root; struct dst_entry *dst; struct route4_bucket *b; struct route4_filter *f; u32 id, h; int iif, dont_cache = 0; - dst = skb_dst(skb); - if (!dst) + if ((dst = skb_dst(skb)) == NULL) goto failure; id = dst->tclassid; if (head == NULL) goto old_method; - iif = ((struct rtable *)dst)->fl.iif; + iif = ((struct rtable*)dst)->fl.iif; h = route4_fastmap_hash(id, iif); if (id == head->fastmap[h].id && @@ -159,8 +161,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, h = route4_hash_to(id); restart: - b = head->table[h]; - if (b) { + if ((b = head->table[h]) != NULL) { for (f = b->ht[route4_hash_from(id)]; f; f = f->next) if (f->id == id) ROUTE4_APPLY_RESULT(); @@ -196,9 +197,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, static inline u32 to_hash(u32 id) { - u32 h = id & 0xFF; - - if (id & 0x8000) + u32 h = id&0xFF; + if (id&0x8000) h += 256; return h; } @@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id) if (!(id & 0x8000)) { if (id > 255) return 256; - return id & 0xF; + return id&0xF; } - return 16 + (id & 0xF); + return 16 + (id&0xF); } static unsigned long route4_get(struct tcf_proto *tp, u32 handle) { - struct route4_head *head = (struct route4_head *)tp->root; + struct route4_head *head = (struct route4_head*)tp->root; struct route4_bucket *b; struct route4_filter *f; - unsigned int h1, h2; + unsigned h1, h2; if (!head) return 0; @@ -230,12 +230,11 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) if (h1 > 256) return 0; - h2 = from_hash(handle >> 16); + h2 = from_hash(handle>>16); if (h2 > 32) return 0; - b = head->table[h1]; - if (b) { + if ((b = head->table[h1]) != NULL) { for (f = b->ht[h2]; f; f = f->next) if (f->handle == handle) return (unsigned long)f; @@ -252,7 +251,7 @@ static int route4_init(struct tcf_proto *tp) return 0; } -static void +static inline void route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) { tcf_unbind_filter(tp, &f->res); @@ -268,12 +267,11 @@ static void route4_destroy(struct tcf_proto *tp) if (head == NULL) return; - for (h1 = 0; h1 <= 256; h1++) { + for (h1=0; h1<=256; h1++) { struct route4_bucket *b; - b = head->table[h1]; - if (b) { - for (h2 = 0; h2 <= 32; h2++) { + if ((b = head->table[h1]) != NULL) { + for (h2=0; h2<=32; h2++) { struct route4_filter *f; while ((f = b->ht[h2]) != NULL) { @@ -289,9 +287,9 @@ static void route4_destroy(struct tcf_proto *tp) static int route4_delete(struct tcf_proto *tp, unsigned long arg) { - struct route4_head *head = (struct route4_head *)tp->root; - struct route4_filter **fp, *f = (struct route4_filter *)arg; - unsigned int h = 0; + struct route4_head *head = (struct route4_head*)tp->root; + struct route4_filter **fp, *f = (struct route4_filter*)arg; + unsigned h = 0; struct route4_bucket *b; int i; @@ -301,7 +299,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) h = f->handle; b = f->bkt; - for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) { + for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) { if (*fp == f) { tcf_tree_lock(tp); *fp = f->next; @@ -312,7 +310,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) /* Strip tree */ - for (i = 0; i <= 32; i++) + for (i=0; i<=32; i++) if (b->ht[i]) return 0; @@ -382,8 +380,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, } h1 = to_hash(nhandle); - b = head->table[h1]; - if (!b) { + if ((b = head->table[h1]) == NULL) { err = -ENOBUFS; b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); if (b == NULL) @@ -394,7 +391,6 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base, tcf_tree_unlock(tp); } else { unsigned int h2 = from_hash(nhandle >> 16); - err = -EEXIST; for (fp = b->ht[h2]; fp; fp = fp->next) if (fp->handle == f->handle) @@ -448,8 +444,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - f = (struct route4_filter *)*arg; - if (f) { + if ((f = (struct route4_filter*)*arg) != NULL) { if (f->handle != handle && handle) return -EINVAL; @@ -486,7 +481,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, reinsert: h = from_hash(f->handle >> 16); - for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next) + for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next) if (f->handle < f1->handle) break; @@ -497,8 +492,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, if (old_handle && f->handle != old_handle) { th = to_hash(old_handle); h = from_hash(old_handle >> 16); - b = head->table[th]; - if (b) { + if ((b = head->table[th]) != NULL) { for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { if (*fp == f) { *fp = f->next; @@ -521,7 +515,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base, static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct route4_head *head = tp->root; - unsigned int h, h1; + unsigned h, h1; if (head == NULL) arg->stop = 1; @@ -555,7 +549,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int route4_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct route4_filter *f = (struct route4_filter *)fh; + struct route4_filter *f = (struct route4_filter*)fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; u32 id; @@ -569,15 +563,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - if (!(f->handle & 0x8000)) { - id = f->id & 0xFF; + if (!(f->handle&0x8000)) { + id = f->id&0xFF; NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); } - if (f->handle & 0x80000000) { - if ((f->handle >> 16) != 0xFFFF) + if (f->handle&0x80000000) { + if ((f->handle>>16) != 0xFFFF) NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); } else { - id = f->id >> 16; + id = f->id>>16; NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); } if (f->res.classid) diff --git a/trunk/net/sched/cls_rsvp.h b/trunk/net/sched/cls_rsvp.h index 402c44b241a3..425a1790b048 100644 --- a/trunk/net/sched/cls_rsvp.h +++ b/trunk/net/sched/cls_rsvp.h @@ -66,25 +66,28 @@ powerful classification engine. */ -struct rsvp_head { +struct rsvp_head +{ u32 tmap[256/32]; u32 hgenerator; u8 tgenerator; struct rsvp_session *ht[256]; }; -struct rsvp_session { +struct rsvp_session +{ struct rsvp_session *next; __be32 dst[RSVP_DST_LEN]; struct tc_rsvp_gpi dpi; u8 protocol; u8 tunnelid; /* 16 (src,sport) hash slots, and one wildcard source slot */ - struct rsvp_filter *ht[16 + 1]; + struct rsvp_filter *ht[16+1]; }; -struct rsvp_filter { +struct rsvp_filter +{ struct rsvp_filter *next; __be32 src[RSVP_DST_LEN]; struct tc_rsvp_gpi spi; @@ -97,19 +100,17 @@ struct rsvp_filter { struct rsvp_session *sess; }; -static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) +static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) { - unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; - + unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; h ^= h>>16; h ^= h>>8; return (h ^ protocol ^ tunnelid) & 0xFF; } -static inline unsigned int hash_src(__be32 *src) +static __inline__ unsigned hash_src(__be32 *src) { - unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; - + unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; h ^= h>>16; h ^= h>>8; h ^= h>>4; @@ -133,10 +134,10 @@ static struct tcf_ext_map rsvp_ext_map = { static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; struct rsvp_session *s; struct rsvp_filter *f; - unsigned int h1, h2; + unsigned h1, h2; __be32 *dst, *src; u8 protocol; u8 tunnelid = 0; @@ -161,13 +162,13 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, src = &nhptr->saddr.s6_addr32[0]; dst = &nhptr->daddr.s6_addr32[0]; protocol = nhptr->nexthdr; - xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr); + xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr); #else src = &nhptr->saddr; dst = &nhptr->daddr; protocol = nhptr->protocol; - xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); - if (nhptr->frag_off & htons(IP_MF | IP_OFFSET)) + xprt = ((u8*)nhptr) + (nhptr->ihl<<2); + if (nhptr->frag_off & htons(IP_MF|IP_OFFSET)) return -1; #endif @@ -175,10 +176,10 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, h2 = hash_src(src); for (s = sht[h1]; s; s = s->next) { - if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && + if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && protocol == s->protocol && !(s->dpi.mask & - (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) && + (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) && #if RSVP_DST_LEN == 4 dst[0] == s->dst[0] && dst[1] == s->dst[1] && @@ -187,8 +188,8 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, tunnelid == s->tunnelid) { for (f = s->ht[h2]; f; f = f->next) { - if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && - !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) + if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && + !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) #if RSVP_DST_LEN == 4 && src[0] == f->src[0] && @@ -204,7 +205,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, return 0; tunnelid = f->res.classid; - nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr)); + nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr)); goto restart; } } @@ -223,11 +224,11 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; struct rsvp_session *s; struct rsvp_filter *f; - unsigned int h1 = handle & 0xFF; - unsigned int h2 = (handle >> 8) & 0xFF; + unsigned h1 = handle&0xFF; + unsigned h2 = (handle>>8)&0xFF; if (h2 > 16) return 0; @@ -257,7 +258,7 @@ static int rsvp_init(struct tcf_proto *tp) return -ENOBUFS; } -static void +static inline void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) { tcf_unbind_filter(tp, &f->res); @@ -276,13 +277,13 @@ static void rsvp_destroy(struct tcf_proto *tp) sht = data->ht; - for (h1 = 0; h1 < 256; h1++) { + for (h1=0; h1<256; h1++) { struct rsvp_session *s; while ((s = sht[h1]) != NULL) { sht[h1] = s->next; - for (h2 = 0; h2 <= 16; h2++) { + for (h2=0; h2<=16; h2++) { struct rsvp_filter *f; while ((f = s->ht[h2]) != NULL) { @@ -298,13 +299,13 @@ static void rsvp_destroy(struct tcf_proto *tp) static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) { - struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; - unsigned int h = f->handle; + struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg; + unsigned h = f->handle; struct rsvp_session **sp; struct rsvp_session *s = f->sess; int i; - for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) { + for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) { if (*fp == f) { tcf_tree_lock(tp); *fp = f->next; @@ -313,12 +314,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) /* Strip tree */ - for (i = 0; i <= 16; i++) + for (i=0; i<=16; i++) if (s->ht[i]) return 0; /* OK, session has no flows */ - for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF]; + for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF]; *sp; sp = &(*sp)->next) { if (*sp == s) { tcf_tree_lock(tp); @@ -336,14 +337,13 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) return 0; } -static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) +static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) { struct rsvp_head *data = tp->root; int i = 0xFFFF; while (i-- > 0) { u32 h; - if ((data->hgenerator += 0x10000) == 0) data->hgenerator = 0x10000; h = data->hgenerator|salt; @@ -355,10 +355,10 @@ static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) static int tunnel_bts(struct rsvp_head *data) { - int n = data->tgenerator >> 5; - u32 b = 1 << (data->tgenerator & 0x1F); + int n = data->tgenerator>>5; + u32 b = 1<<(data->tgenerator&0x1F); - if (data->tmap[n] & b) + if (data->tmap[n]&b) return 0; data->tmap[n] |= b; return 1; @@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data) memset(tmap, 0, sizeof(tmap)); - for (h1 = 0; h1 < 256; h1++) { + for (h1=0; h1<256; h1++) { struct rsvp_session *s; for (s = sht[h1]; s; s = s->next) { - for (h2 = 0; h2 <= 16; h2++) { + for (h2=0; h2<=16; h2++) { struct rsvp_filter *f; for (f = s->ht[h2]; f; f = f->next) { @@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data) { int i, k; - for (k = 0; k < 2; k++) { - for (i = 255; i > 0; i--) { + for (k=0; k<2; k++) { + for (i=255; i>0; i--) { if (++data->tgenerator == 0) data->tgenerator = 1; if (tunnel_bts(data)) @@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, struct nlattr *opt = tca[TCA_OPTIONS-1]; struct nlattr *tb[TCA_RSVP_MAX + 1]; struct tcf_exts e; - unsigned int h1, h2; + unsigned h1, h2; __be32 *dst; int err; @@ -443,8 +443,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - f = (struct rsvp_filter *)*arg; - if (f) { + if ((f = (struct rsvp_filter*)*arg) != NULL) { /* Node exists: adjust only classid */ if (f->handle != handle && handle) @@ -501,7 +500,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, goto errout; } - for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) { + for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && pinfo && pinfo->protocol == s->protocol && memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && @@ -524,7 +523,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, tcf_exts_change(tp, &f->exts, &e); for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) - if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask) + if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask) break; f->next = *fp; wmb(); @@ -568,7 +567,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct rsvp_head *head = tp->root; - unsigned int h, h1; + unsigned h, h1; if (arg->stop) return; @@ -599,7 +598,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct rsvp_filter *f = (struct rsvp_filter *)fh; + struct rsvp_filter *f = (struct rsvp_filter*)fh; struct rsvp_session *s; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; @@ -625,7 +624,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); if (f->res.classid) NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); - if (((f->handle >> 8) & 0xFF) != 16) + if (((f->handle>>8)&0xFF) != 16) NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) diff --git a/trunk/net/sched/cls_tcindex.c b/trunk/net/sched/cls_tcindex.c index 36667fa64237..20ef330bb918 100644 --- a/trunk/net/sched/cls_tcindex.c +++ b/trunk/net/sched/cls_tcindex.c @@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, * of the hashing index is below the threshold. */ if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) - cp.hash = (cp.mask >> cp.shift) + 1; + cp.hash = (cp.mask >> cp.shift)+1; else cp.hash = DEFAULT_HASH_SIZE; } diff --git a/trunk/net/sched/cls_u32.c b/trunk/net/sched/cls_u32.c index 966920c14e7a..b0c2a82178af 100644 --- a/trunk/net/sched/cls_u32.c +++ b/trunk/net/sched/cls_u32.c @@ -42,7 +42,8 @@ #include #include -struct tc_u_knode { +struct tc_u_knode +{ struct tc_u_knode *next; u32 handle; struct tc_u_hnode *ht_up; @@ -62,17 +63,19 @@ struct tc_u_knode { struct tc_u32_sel sel; }; -struct tc_u_hnode { +struct tc_u_hnode +{ struct tc_u_hnode *next; u32 handle; u32 prio; struct tc_u_common *tp_c; int refcnt; - unsigned int divisor; + unsigned divisor; struct tc_u_knode *ht[1]; }; -struct tc_u_common { +struct tc_u_common +{ struct tc_u_hnode *hlist; struct Qdisc *q; int refcnt; @@ -84,11 +87,9 @@ static const struct tcf_ext_map u32_ext_map = { .police = TCA_U32_POLICE }; -static inline unsigned int u32_hash_fold(__be32 key, - const struct tc_u32_sel *sel, - u8 fshift) +static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) { - unsigned int h = ntohl(key & sel->hmask) >> fshift; + unsigned h = ntohl(key & sel->hmask)>>fshift; return h; } @@ -100,7 +101,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re unsigned int off; } stack[TC_U32_MAXDEPTH]; - struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root; + struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; unsigned int off = skb_network_offset(skb); struct tc_u_knode *n; int sdepth = 0; @@ -119,7 +120,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re struct tc_u32_key *key = n->sel.keys; #ifdef CONFIG_CLS_U32_PERF - n->pf->rcnt += 1; + n->pf->rcnt +=1; j = 0; #endif @@ -132,7 +133,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re } #endif - for (i = n->sel.nkeys; i > 0; i--, key++) { + for (i = n->sel.nkeys; i>0; i--, key++) { int toff = off + key->off + (off2 & key->offmask); __be32 *data, _data; @@ -147,13 +148,13 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re goto next_knode; } #ifdef CONFIG_CLS_U32_PERF - n->pf->kcnts[j] += 1; + n->pf->kcnts[j] +=1; j++; #endif } if (n->ht_down == NULL) { check_terminal: - if (n->sel.flags & TC_U32_TERMINAL) { + if (n->sel.flags&TC_U32_TERMINAL) { *res = n->res; #ifdef CONFIG_NET_CLS_IND @@ -163,7 +164,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re } #endif #ifdef CONFIG_CLS_U32_PERF - n->pf->rhit += 1; + n->pf->rhit +=1; #endif r = tcf_exts_exec(skb, &n->exts, res); if (r < 0) { @@ -196,10 +197,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re sel = ht->divisor & u32_hash_fold(*data, &n->sel, n->fshift); } - if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) + if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) goto next_ht; - if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { + if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { off2 = n->sel.off + 3; if (n->sel.flags & TC_U32_VAROFFSET) { __be16 *data, _data; @@ -214,7 +215,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re } off2 &= ~3; } - if (n->sel.flags & TC_U32_EAT) { + if (n->sel.flags&TC_U32_EAT) { off += off2; off2 = 0; } @@ -235,11 +236,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re deadloop: if (net_ratelimit()) - pr_warning("cls_u32: dead loop\n"); + printk(KERN_WARNING "cls_u32: dead loop\n"); return -1; } -static struct tc_u_hnode * +static __inline__ struct tc_u_hnode * u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) { struct tc_u_hnode *ht; @@ -251,10 +252,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) return ht; } -static struct tc_u_knode * +static __inline__ struct tc_u_knode * u32_lookup_key(struct tc_u_hnode *ht, u32 handle) { - unsigned int sel; + unsigned sel; struct tc_u_knode *n = NULL; sel = TC_U32_HASH(handle); @@ -299,7 +300,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c) do { if (++tp_c->hgenerator == 0x7FF) tp_c->hgenerator = 1; - } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); + } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; } @@ -377,9 +378,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) { struct tc_u_knode *n; - unsigned int h; + unsigned h; - for (h = 0; h <= ht->divisor; h++) { + for (h=0; h<=ht->divisor; h++) { while ((n = ht->ht[h]) != NULL) { ht->ht[h] = n->next; @@ -445,13 +446,13 @@ static void u32_destroy(struct tcf_proto *tp) static int u32_delete(struct tcf_proto *tp, unsigned long arg) { - struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; + struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; if (ht == NULL) return 0; if (TC_U32_KEY(ht->handle)) - return u32_delete_key(tp, (struct tc_u_knode *)ht); + return u32_delete_key(tp, (struct tc_u_knode*)ht); if (tp->root == ht) return -EINVAL; @@ -469,14 +470,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) { struct tc_u_knode *n; - unsigned int i = 0x7FF; + unsigned i = 0x7FF; - for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) + for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) if (i < TC_U32_NODE(n->handle)) i = TC_U32_NODE(n->handle); i++; - return handle | (i > 0xFFF ? 0xFFF : i); + return handle|(i>0xFFF ? 0xFFF : i); } static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { @@ -565,8 +566,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, if (err < 0) return err; - n = (struct tc_u_knode *)*arg; - if (n) { + if ((n = (struct tc_u_knode*)*arg) != NULL) { if (TC_U32_KEY(n->handle) == 0) return -EINVAL; @@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, } if (tb[TCA_U32_DIVISOR]) { - unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); + unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); if (--divisor > 0x100) return -EINVAL; @@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, if (handle == 0) return -ENOMEM; } - ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); + ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); if (ht == NULL) return -ENOBUFS; ht->tp_c = tp_c; @@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; - unsigned int h; + unsigned h; if (arg->stop) return; @@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct tc_u_knode *n = (struct tc_u_knode *)fh; + struct tc_u_knode *n = (struct tc_u_knode*)fh; struct nlattr *nest; if (n == NULL) @@ -730,9 +730,8 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, goto nla_put_failure; if (TC_U32_KEY(n->handle) == 0) { - struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; - u32 divisor = ht->divisor + 1; - + struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; + u32 divisor = ht->divisor+1; NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); } else { NLA_PUT(skb, TCA_U32_SEL, @@ -756,7 +755,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND - if (strlen(n->indev)) + if(strlen(n->indev)) NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); #endif #ifdef CONFIG_CLS_U32_PERF diff --git a/trunk/net/sched/em_cmp.c b/trunk/net/sched/em_cmp.c index 1c8360a2752a..bc450397487a 100644 --- a/trunk/net/sched/em_cmp.c +++ b/trunk/net/sched/em_cmp.c @@ -33,41 +33,40 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, return 0; switch (cmp->align) { - case TCF_EM_ALIGN_U8: - val = *ptr; - break; + case TCF_EM_ALIGN_U8: + val = *ptr; + break; - case TCF_EM_ALIGN_U16: - val = get_unaligned_be16(ptr); + case TCF_EM_ALIGN_U16: + val = get_unaligned_be16(ptr); - if (cmp_needs_transformation(cmp)) - val = be16_to_cpu(val); - break; + if (cmp_needs_transformation(cmp)) + val = be16_to_cpu(val); + break; - case TCF_EM_ALIGN_U32: - /* Worth checking boundries? The branching seems - * to get worse. Visit again. - */ - val = get_unaligned_be32(ptr); + case TCF_EM_ALIGN_U32: + /* Worth checking boundries? The branching seems + * to get worse. Visit again. */ + val = get_unaligned_be32(ptr); - if (cmp_needs_transformation(cmp)) - val = be32_to_cpu(val); - break; + if (cmp_needs_transformation(cmp)) + val = be32_to_cpu(val); + break; - default: - return 0; + default: + return 0; } if (cmp->mask) val &= cmp->mask; switch (cmp->opnd) { - case TCF_EM_OPND_EQ: - return val == cmp->val; - case TCF_EM_OPND_LT: - return val < cmp->val; - case TCF_EM_OPND_GT: - return val > cmp->val; + case TCF_EM_OPND_EQ: + return val == cmp->val; + case TCF_EM_OPND_LT: + return val < cmp->val; + case TCF_EM_OPND_GT: + return val > cmp->val; } return 0; diff --git a/trunk/net/sched/em_meta.c b/trunk/net/sched/em_meta.c index a889d099320f..34da5e29ea1a 100644 --- a/trunk/net/sched/em_meta.c +++ b/trunk/net/sched/em_meta.c @@ -73,18 +73,21 @@ #include #include -struct meta_obj { +struct meta_obj +{ unsigned long value; unsigned int len; }; -struct meta_value { +struct meta_value +{ struct tcf_meta_val hdr; unsigned long val; unsigned int len; }; -struct meta_match { +struct meta_match +{ struct meta_value lvalue; struct meta_value rvalue; }; @@ -252,7 +255,7 @@ META_COLLECTOR(int_rtclassid) if (unlikely(skb_dst(skb) == NULL)) *err = -1; else -#ifdef CONFIG_IP_ROUTE_CLASSID +#ifdef CONFIG_NET_CLS_ROUTE dst->value = skb_dst(skb)->tclassid; #else dst->value = 0; @@ -480,7 +483,8 @@ META_COLLECTOR(int_sk_write_pend) * Meta value collectors assignment table **************************************************************************/ -struct meta_ops { +struct meta_ops +{ void (*get)(struct sk_buff *, struct tcf_pkt_info *, struct meta_value *, struct meta_obj *, int *); }; @@ -490,7 +494,7 @@ struct meta_ops { /* Meta value operations table listing all meta value collectors and * assigns them to a type and meta id. */ -static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = { +static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [TCF_META_TYPE_VAR] = { [META_ID(DEV)] = META_FUNC(var_dev), [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), @@ -546,7 +550,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = } }; -static inline struct meta_ops *meta_ops(struct meta_value *val) +static inline struct meta_ops * meta_ops(struct meta_value *val) { return &__meta_ops[meta_type(val)][meta_id(val)]; } @@ -645,8 +649,9 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) { if (v->len == sizeof(unsigned long)) NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); - else if (v->len == sizeof(u32)) + else if (v->len == sizeof(u32)) { NLA_PUT_U32(skb, tlv, v->val); + } return 0; @@ -658,7 +663,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) * Type specific operations table **************************************************************************/ -struct meta_type_ops { +struct meta_type_ops +{ void (*destroy)(struct meta_value *); int (*compare)(struct meta_obj *, struct meta_obj *); int (*change)(struct meta_value *, struct nlattr *); @@ -666,7 +672,7 @@ struct meta_type_ops { int (*dump)(struct sk_buff *, struct meta_value *, int); }; -static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { +static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { [TCF_META_TYPE_VAR] = { .destroy = meta_var_destroy, .compare = meta_var_compare, @@ -682,7 +688,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { } }; -static inline struct meta_type_ops *meta_type_ops(struct meta_value *v) +static inline struct meta_type_ops * meta_type_ops(struct meta_value *v) { return &__meta_type_ops[meta_type(v)]; } @@ -707,7 +713,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, return err; if (meta_type_ops(v)->apply_extras) - meta_type_ops(v)->apply_extras(v, dst); + meta_type_ops(v)->apply_extras(v, dst); return 0; } @@ -726,12 +732,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); switch (meta->lvalue.hdr.op) { - case TCF_EM_OPND_EQ: - return !r; - case TCF_EM_OPND_LT: - return r < 0; - case TCF_EM_OPND_GT: - return r > 0; + case TCF_EM_OPND_EQ: + return !r; + case TCF_EM_OPND_LT: + return r < 0; + case TCF_EM_OPND_GT: + return r > 0; } return 0; @@ -765,7 +771,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla) static inline int meta_is_supported(struct meta_value *val) { - return !meta_id(val) || meta_ops(val)->get; + return (!meta_id(val) || meta_ops(val)->get); } static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { diff --git a/trunk/net/sched/em_nbyte.c b/trunk/net/sched/em_nbyte.c index a3bed07a008b..1a4176aee6e5 100644 --- a/trunk/net/sched/em_nbyte.c +++ b/trunk/net/sched/em_nbyte.c @@ -18,7 +18,8 @@ #include #include -struct nbyte_data { +struct nbyte_data +{ struct tcf_em_nbyte hdr; char pattern[0]; }; diff --git a/trunk/net/sched/em_text.c b/trunk/net/sched/em_text.c index 15d353d2e4be..ea8f566e720c 100644 --- a/trunk/net/sched/em_text.c +++ b/trunk/net/sched/em_text.c @@ -19,7 +19,8 @@ #include #include -struct text_match { +struct text_match +{ u16 from_offset; u16 to_offset; u8 from_layer; diff --git a/trunk/net/sched/em_u32.c b/trunk/net/sched/em_u32.c index 797bdb88c010..953f1479f7da 100644 --- a/trunk/net/sched/em_u32.c +++ b/trunk/net/sched/em_u32.c @@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, if (!tcf_valid_offset(skb, ptr, sizeof(u32))) return 0; - return !(((*(__be32 *) ptr) ^ key->val) & key->mask); + return !(((*(__be32*) ptr) ^ key->val) & key->mask); } static struct tcf_ematch_ops em_u32_ops = { diff --git a/trunk/net/sched/ematch.c b/trunk/net/sched/ematch.c index 88d93eb92507..5e37da961f80 100644 --- a/trunk/net/sched/ematch.c +++ b/trunk/net/sched/ematch.c @@ -93,7 +93,7 @@ static LIST_HEAD(ematch_ops); static DEFINE_RWLOCK(ematch_mod_lock); -static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) +static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) { struct tcf_ematch_ops *e = NULL; @@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops) } EXPORT_SYMBOL(tcf_em_unregister); -static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, - int index) +static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree, + int index) { return &tree->matches[index]; } @@ -184,8 +184,7 @@ static int tcf_em_validate(struct tcf_proto *tp, if (em_hdr->kind == TCF_EM_CONTAINER) { /* Special ematch called "container", carries an index - * referencing an external ematch sequence. - */ + * referencing an external ematch sequence. */ u32 ref; if (data_len < sizeof(ref)) @@ -196,8 +195,7 @@ static int tcf_em_validate(struct tcf_proto *tp, goto errout; /* We do not allow backward jumps to avoid loops and jumps - * to our own position are of course illegal. - */ + * to our own position are of course illegal. */ if (ref <= idx) goto errout; @@ -210,8 +208,7 @@ static int tcf_em_validate(struct tcf_proto *tp, * which automatically releases the reference again, therefore * the module MUST not be given back under any circumstances * here. Be aware, the destroy function assumes that the - * module is held if the ops field is non zero. - */ + * module is held if the ops field is non zero. */ em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops == NULL) { @@ -224,8 +221,7 @@ static int tcf_em_validate(struct tcf_proto *tp, if (em->ops) { /* We dropped the RTNL mutex in order to * perform the module load. Tell the caller - * to replay the request. - */ + * to replay the request. */ module_put(em->ops->owner); err = -EAGAIN; } @@ -234,8 +230,7 @@ static int tcf_em_validate(struct tcf_proto *tp, } /* ematch module provides expected length of data, so we - * can do a basic sanity check. - */ + * can do a basic sanity check. */ if (em->ops->datalen && data_len < em->ops->datalen) goto errout; @@ -251,8 +246,7 @@ static int tcf_em_validate(struct tcf_proto *tp, * TCF_EM_SIMPLE may be specified stating that the * data only consists of a u32 integer and the module * does not expected a memory reference but rather - * the value carried. - */ + * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { if (data_len < sizeof(u32)) goto errout; @@ -340,8 +334,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, * The array of rt attributes is parsed in the order as they are * provided, their type must be incremental from 1 to n. Even * if it does not serve any real purpose, a failure of sticking - * to this policy will result in parsing failure. - */ + * to this policy will result in parsing failure. */ for (idx = 0; nla_ok(rt_match, list_len); idx++) { err = -EINVAL; @@ -366,8 +359,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, /* Check if the number of matches provided by userspace actually * complies with the array of matches. The number was used for * the validation of references and a mismatch could lead to - * undefined references during the matching process. - */ + * undefined references during the matching process. */ if (idx != tree_hdr->nmatches) { err = -EINVAL; goto errout_abort; @@ -457,7 +449,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) .flags = em->flags }; - NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); + NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr); if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) @@ -486,7 +478,6 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { int r = em->ops->match(skb, em, info); - return tcf_em_is_inverted(em) ? !r : r; } @@ -536,8 +527,8 @@ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, stack_overflow: if (net_ratelimit()) - pr_warning("tc ematch: local stack overflow," - " increase NET_EMATCH_STACK\n"); + printk(KERN_WARNING "tc ematch: local stack overflow," + " increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match); diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index 150741579408..b22ca2d1cebc 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops) int err = -ENOENT; write_lock(&qdisc_mod_lock); - for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) + for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) if (q == qops) break; if (q) { @@ -321,9 +321,7 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) if (!tab || --tab->refcnt) return; - for (rtabp = &qdisc_rtab_list; - (rtab = *rtabp) != NULL; - rtabp = &rtab->next) { + for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) { if (rtab == tab) { *rtabp = rtab->next; kfree(rtab); @@ -398,11 +396,6 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) return stab; } -static void stab_kfree_rcu(struct rcu_head *head) -{ - kfree(container_of(head, struct qdisc_size_table, rcu)); -} - void qdisc_put_stab(struct qdisc_size_table *tab) { if (!tab) @@ -412,7 +405,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab) if (--tab->refcnt == 0) { list_del(&tab->list); - call_rcu_bh(&tab->rcu, stab_kfree_rcu); + kfree(tab); } spin_unlock(&qdisc_stab_lock); @@ -435,7 +428,7 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) return -1; } -void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab) +void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab) { int pkt_len, slot; @@ -461,13 +454,14 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_tabl pkt_len = 1; qdisc_skb_cb(skb)->pkt_len = pkt_len; } -EXPORT_SYMBOL(__qdisc_calculate_pkt_len); +EXPORT_SYMBOL(qdisc_calculate_pkt_len); void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) { if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { - pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", - txt, qdisc->ops->id, qdisc->handle >> 16); + printk(KERN_WARNING + "%s: %s qdisc %X: is non-work-conserving?\n", + txt, qdisc->ops->id, qdisc->handle >> 16); qdisc->flags |= TCQ_F_WARN_NONWC; } } @@ -478,7 +472,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, timer); - qdisc_unthrottled(wd->qdisc); + wd->qdisc->flags &= ~TCQ_F_THROTTLED; __netif_schedule(qdisc_root(wd->qdisc)); return HRTIMER_NORESTART; @@ -500,7 +494,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) &qdisc_root_sleeping(wd->qdisc)->state)) return; - qdisc_throttled(wd->qdisc); + wd->qdisc->flags |= TCQ_F_THROTTLED; time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); @@ -510,7 +504,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule); void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) { hrtimer_cancel(&wd->timer); - qdisc_unthrottled(wd->qdisc); + wd->qdisc->flags &= ~TCQ_F_THROTTLED; } EXPORT_SYMBOL(qdisc_watchdog_cancel); @@ -631,7 +625,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev) autohandle = TC_H_MAKE(0x80000000U, 0); } while (qdisc_lookup(dev, autohandle) && --i > 0); - return i > 0 ? autohandle : 0; + return i>0 ? autohandle : 0; } void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) @@ -840,7 +834,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, err = PTR_ERR(stab); goto err_out4; } - rcu_assign_pointer(sch->stab, stab); + sch->stab = stab; } if (tca[TCA_RATE]) { spinlock_t *root_lock; @@ -880,7 +874,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, * Any broken qdiscs that would require a ops->reset() here? * The qdisc was never in action so it shouldn't be necessary. */ - qdisc_put_stab(rtnl_dereference(sch->stab)); + qdisc_put_stab(sch->stab); if (ops->destroy) ops->destroy(sch); goto err_out3; @@ -888,7 +882,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) { - struct qdisc_size_table *ostab, *stab = NULL; + struct qdisc_size_table *stab = NULL; int err = 0; if (tca[TCA_OPTIONS]) { @@ -905,9 +899,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) return PTR_ERR(stab); } - ostab = rtnl_dereference(sch->stab); - rcu_assign_pointer(sch->stab, stab); - qdisc_put_stab(ostab); + qdisc_put_stab(sch->stab); + sch->stab = stab; if (tca[TCA_RATE]) { /* NB: ignores errors from replace_estimator @@ -922,8 +915,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) return 0; } -struct check_loop_arg { - struct qdisc_walker w; +struct check_loop_arg +{ + struct qdisc_walker w; struct Qdisc *p; int depth; }; @@ -976,8 +970,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) struct Qdisc *p = NULL; int err; - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) + if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); @@ -987,12 +980,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (clid) { if (clid != TC_H_ROOT) { if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { - p = qdisc_lookup(dev, TC_H_MAJ(clid)); - if (!p) + if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) return -ENOENT; q = qdisc_leaf(p, clid); - } else if (dev_ingress_queue(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + } else { /* ingress */ + if (dev_ingress_queue(dev)) + q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; @@ -1003,8 +996,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (tcm->tcm_handle && q->handle != tcm->tcm_handle) return -EINVAL; } else { - q = qdisc_lookup(dev, tcm->tcm_handle); - if (!q) + if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) return -ENOENT; } @@ -1016,8 +1008,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return -EINVAL; if (q->handle == 0) return -ENOENT; - err = qdisc_graft(dev, p, skb, n, clid, NULL, q); - if (err != 0) + if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) return err; } else { qdisc_notify(net, skb, n, clid, NULL, q); @@ -1026,7 +1017,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } /* - * Create/change qdisc. + Create/change qdisc. */ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) @@ -1045,8 +1036,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) clid = tcm->tcm_parent; q = p = NULL; - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) + if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); @@ -1056,12 +1046,12 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (clid) { if (clid != TC_H_ROOT) { if (clid != TC_H_INGRESS) { - p = qdisc_lookup(dev, TC_H_MAJ(clid)); - if (!p) + if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) return -ENOENT; q = qdisc_leaf(p, clid); - } else if (dev_ingress_queue_create(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + } else { /* ingress */ + if (dev_ingress_queue_create(dev)) + q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; @@ -1073,14 +1063,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { if (tcm->tcm_handle) { - if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) + if (q && !(n->nlmsg_flags&NLM_F_REPLACE)) return -EEXIST; if (TC_H_MIN(tcm->tcm_handle)) return -EINVAL; - q = qdisc_lookup(dev, tcm->tcm_handle); - if (!q) + if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) goto create_n_graft; - if (n->nlmsg_flags & NLM_F_EXCL) + if (n->nlmsg_flags&NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; @@ -1090,7 +1079,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) atomic_inc(&q->refcnt); goto graft; } else { - if (!q) + if (q == NULL) goto create_n_graft; /* This magic test requires explanation. @@ -1112,9 +1101,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) * For now we select create/graft, if * user gave KIND, which does not match existing. */ - if ((n->nlmsg_flags & NLM_F_CREATE) && - (n->nlmsg_flags & NLM_F_REPLACE) && - ((n->nlmsg_flags & NLM_F_EXCL) || + if ((n->nlmsg_flags&NLM_F_CREATE) && + (n->nlmsg_flags&NLM_F_REPLACE) && + ((n->nlmsg_flags&NLM_F_EXCL) || (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)))) goto create_n_graft; @@ -1129,7 +1118,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) /* Change qdisc parameters */ if (q == NULL) return -ENOENT; - if (n->nlmsg_flags & NLM_F_EXCL) + if (n->nlmsg_flags&NLM_F_EXCL) return -EEXIST; if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL; @@ -1139,7 +1128,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return err; create_n_graft: - if (!(n->nlmsg_flags & NLM_F_CREATE)) + if (!(n->nlmsg_flags&NLM_F_CREATE)) return -ENOENT; if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) @@ -1186,7 +1175,6 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; - struct qdisc_size_table *stab; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); @@ -1202,8 +1190,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, goto nla_put_failure; q->qstats.qlen = q->q.qlen; - stab = rtnl_dereference(q->stab); - if (stab && qdisc_dump_stab(skb, stab) < 0) + if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) goto nla_put_failure; if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, @@ -1247,19 +1234,16 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, return -ENOBUFS; if (old && !tc_qdisc_dump_ignore(old)) { - if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, - 0, RTM_DELQDISC) < 0) + if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) goto err_out; } if (new && !tc_qdisc_dump_ignore(new)) { - if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, - old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) + if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) goto err_out; } if (skb->len) - return rtnetlink_send(skb, net, pid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); + return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); err_out: kfree_skb(skb); @@ -1291,7 +1275,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, q_idx++; continue; } - if (!tc_qdisc_dump_ignore(q) && + if (!tc_qdisc_dump_ignore(q) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; @@ -1372,8 +1356,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) u32 qid = TC_H_MAJ(clid); int err; - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) + if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) return -ENODEV; err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); @@ -1408,9 +1391,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) qid = dev->qdisc->handle; /* Now qid is genuine qdisc handle consistent - * both with parent and child. - * - * TC_H_MAJ(pid) still may be unspecified, complete it now. + both with parent and child. + + TC_H_MAJ(pid) still may be unspecified, complete it now. */ if (pid) pid = TC_H_MAKE(qid, pid); @@ -1420,8 +1403,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } /* OK. Locate qdisc */ - q = qdisc_lookup(dev, qid); - if (!q) + if ((q = qdisc_lookup(dev, qid)) == NULL) return -ENOENT; /* An check that it supports classes */ @@ -1441,14 +1423,13 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (cl == 0) { err = -ENOENT; - if (n->nlmsg_type != RTM_NEWTCLASS || - !(n->nlmsg_flags & NLM_F_CREATE)) + if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) goto out; } else { switch (n->nlmsg_type) { case RTM_NEWTCLASS: err = -EEXIST; - if (n->nlmsg_flags & NLM_F_EXCL) + if (n->nlmsg_flags&NLM_F_EXCL) goto out; break; case RTM_DELTCLASS: @@ -1540,14 +1521,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb, return -EINVAL; } - return rtnetlink_send(skb, net, pid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); + return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); } -struct qdisc_dump_args { - struct qdisc_walker w; - struct sk_buff *skb; - struct netlink_callback *cb; +struct qdisc_dump_args +{ + struct qdisc_walker w; + struct sk_buff *skb; + struct netlink_callback *cb; }; static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) @@ -1609,7 +1590,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) { - struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); + struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); struct net *net = sock_net(skb->sk); struct netdev_queue *dev_queue; struct net_device *dev; @@ -1617,8 +1598,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return 0; - dev = dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) + if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) return 0; s_t = cb->args[0]; @@ -1641,22 +1621,19 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) } /* Main classifier routine: scans classifier chain attached - * to this qdisc, (optionally) tests for protocol and asks - * specific classifiers. + to this qdisc, (optionally) tests for protocol and asks + specific classifiers. */ int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { __be16 protocol = skb->protocol; - int err; + int err = 0; for (; tp; tp = tp->next) { - if (tp->protocol != protocol && - tp->protocol != htons(ETH_P_ALL)) - continue; - err = tp->classify(skb, tp, res); - - if (err >= 0) { + if ((tp->protocol == protocol || + tp->protocol == htons(ETH_P_ALL)) && + (err = tp->classify(skb, tp, res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT if (err != TC_ACT_RECLASSIFY && skb->tc_verd) skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); @@ -1687,11 +1664,11 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, if (verd++ >= MAX_REC_LOOP) { if (net_ratelimit()) - pr_notice("%s: packet reclassify loop" + printk(KERN_NOTICE + "%s: packet reclassify loop" " rule prio %u protocol %02x\n", - tp->q->ops->id, - tp->prio & 0xffff, - ntohs(tp->protocol)); + tp->q->ops->id, + tp->prio & 0xffff, ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); @@ -1784,7 +1761,7 @@ static int __init pktsched_init(void) err = register_pernet_subsys(&psched_net_ops); if (err) { - pr_err("pktsched_init: " + printk(KERN_ERR "pktsched_init: " "cannot initialize per netns operations\n"); return err; } diff --git a/trunk/net/sched/sch_atm.c b/trunk/net/sched/sch_atm.c index 3f08158b8688..943d733409d0 100644 --- a/trunk/net/sched/sch_atm.c +++ b/trunk/net/sched/sch_atm.c @@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) * creation), and one for the reference held when calling delete. */ if (flow->ref < 2) { - pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); + printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref); return -EINVAL; } if (flow->ref > 2) @@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } flow = NULL; -done: - ; + done: + ; } - if (!flow) { + if (!flow) flow = &p->link; - } else { + else { if (flow->vcc) ATM_SKB(skb)->atm_options = flow->vcc->atm_options; /*@@@ looks good ... but it's not supposed to work :-) */ @@ -576,7 +576,8 @@ static void atm_tc_destroy(struct Qdisc *sch) list_for_each_entry_safe(flow, tmp, &p->flows, list) { if (flow->ref > 1) - pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); + printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, + flow->ref); atm_tc_put(sch, (unsigned long)flow); } tasklet_kill(&p->task); @@ -615,8 +616,9 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, } if (flow->excess) NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); - else + else { NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); + } nla_nest_end(skb, nest); return skb->len; diff --git a/trunk/net/sched/sch_cbq.c b/trunk/net/sched/sch_cbq.c index 24d94c097b35..c80d1c210c5d 100644 --- a/trunk/net/sched/sch_cbq.c +++ b/trunk/net/sched/sch_cbq.c @@ -72,7 +72,8 @@ struct cbq_sched_data; -struct cbq_class { +struct cbq_class +{ struct Qdisc_class_common common; struct cbq_class *next_alive; /* next class with backlog in this priority band */ @@ -138,18 +139,19 @@ struct cbq_class { int refcnt; int filters; - struct cbq_class *defaults[TC_PRIO_MAX + 1]; + struct cbq_class *defaults[TC_PRIO_MAX+1]; }; -struct cbq_sched_data { +struct cbq_sched_data +{ struct Qdisc_class_hash clhash; /* Hash table of all classes */ - int nclasses[TC_CBQ_MAXPRIO + 1]; - unsigned int quanta[TC_CBQ_MAXPRIO + 1]; + int nclasses[TC_CBQ_MAXPRIO+1]; + unsigned quanta[TC_CBQ_MAXPRIO+1]; struct cbq_class link; - unsigned int activemask; - struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes + unsigned activemask; + struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes with backlog */ #ifdef CONFIG_NET_CLS_ACT @@ -160,7 +162,7 @@ struct cbq_sched_data { int tx_len; psched_time_t now; /* Cached timestamp */ psched_time_t now_rt; /* Cached real time */ - unsigned int pmask; + unsigned pmask; struct hrtimer delay_timer; struct qdisc_watchdog watchdog; /* Watchdog timer, @@ -173,9 +175,9 @@ struct cbq_sched_data { }; -#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) +#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) -static inline struct cbq_class * +static __inline__ struct cbq_class * cbq_class_lookup(struct cbq_sched_data *q, u32 classid) { struct Qdisc_class_common *clc; @@ -191,27 +193,25 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid) static struct cbq_class * cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) { - struct cbq_class *cl; + struct cbq_class *cl, *new; - for (cl = this->tparent; cl; cl = cl->tparent) { - struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; - - if (new != NULL && new != this) + for (cl = this->tparent; cl; cl = cl->tparent) + if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) return new; - } + return NULL; } #endif /* Classify packet. The procedure is pretty complicated, but - * it allows us to combine link sharing and priority scheduling - * transparently. - * - * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, - * so that it resolves to split nodes. Then packets are classified - * by logical priority, or a more specific classifier may be attached - * to the split node. + it allows us to combine link sharing and priority scheduling + transparently. + + Namely, you can put link sharing rules (f.e. route based) at root of CBQ, + so that it resolves to split nodes. Then packets are classified + by logical priority, or a more specific classifier may be attached + to the split node. */ static struct cbq_class * @@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) /* * Step 1. If skb->priority points to one of our classes, use it. */ - if (TC_H_MAJ(prio ^ sch->handle) == 0 && + if (TC_H_MAJ(prio^sch->handle) == 0 && (cl = cbq_class_lookup(q, prio)) != NULL) return cl; @@ -243,11 +243,10 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) goto fallback; - cl = (void *)res.class; - if (!cl) { + if ((cl = (void*)res.class) == NULL) { if (TC_H_MAJ(res.classid)) cl = cbq_class_lookup(q, res.classid); - else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) + else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL) cl = defmap[TC_PRIO_BESTEFFORT]; if (cl == NULL || cl->level >= head->level) @@ -283,7 +282,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) * Step 4. No success... */ if (TC_H_MAJ(prio) == 0 && - !(cl = head->defaults[prio & TC_PRIO_MAX]) && + !(cl = head->defaults[prio&TC_PRIO_MAX]) && !(cl = head->defaults[TC_PRIO_BESTEFFORT])) return head; @@ -291,12 +290,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } /* - * A packet has just been enqueued on the empty class. - * cbq_activate_class adds it to the tail of active class list - * of its priority band. + A packet has just been enqueued on the empty class. + cbq_activate_class adds it to the tail of active class list + of its priority band. */ -static inline void cbq_activate_class(struct cbq_class *cl) +static __inline__ void cbq_activate_class(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); int prio = cl->cpriority; @@ -315,9 +314,9 @@ static inline void cbq_activate_class(struct cbq_class *cl) } /* - * Unlink class from active chain. - * Note that this same procedure is done directly in cbq_dequeue* - * during round-robin procedure. + Unlink class from active chain. + Note that this same procedure is done directly in cbq_dequeue* + during round-robin procedure. */ static void cbq_deactivate_class(struct cbq_class *this) @@ -351,7 +350,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) { int toplevel = q->toplevel; - if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { + if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) { psched_time_t now; psched_tdiff_t incr; @@ -364,7 +363,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) q->toplevel = cl->level; return; } - } while ((cl = cl->borrow) != NULL && toplevel > cl->level); + } while ((cl=cl->borrow) != NULL && toplevel > cl->level); } } @@ -391,6 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; + qdisc_bstats_update(sch, skb); cbq_mark_toplevel(q, cl); if (!cl->next_alive) cbq_activate_class(cl); @@ -418,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl) delay += cl->offtime; /* - * Class goes to sleep, so that it will have no - * chance to work avgidle. Let's forgive it 8) - * - * BTW cbq-2.0 has a crap in this - * place, apparently they forgot to shift it by cl->ewma_log. + Class goes to sleep, so that it will have no + chance to work avgidle. Let's forgive it 8) + + BTW cbq-2.0 has a crap in this + place, apparently they forgot to shift it by cl->ewma_log. */ if (cl->avgidle < 0) delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); @@ -439,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl) q->wd_expires = delay; /* Dirty work! We must schedule wakeups based on - * real available rate, rather than leaf rate, - * which may be tiny (even zero). + real available rate, rather than leaf rate, + which may be tiny (even zero). */ if (q->toplevel == TC_CBQ_MAXLEVEL) { struct cbq_class *b; @@ -460,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) } /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when - * they go overlimit + they go overlimit */ static void cbq_ovl_rclassic(struct cbq_class *cl) @@ -595,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) struct Qdisc *sch = q->watchdog.qdisc; psched_time_t now; psched_tdiff_t delay = 0; - unsigned int pmask; + unsigned pmask; now = psched_get_time(); @@ -624,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); } - qdisc_unthrottled(sch); + sch->flags &= ~TCQ_F_THROTTLED; __netif_schedule(qdisc_root(sch)); return HRTIMER_NORESTART; } @@ -649,6 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; + qdisc_bstats_update(sch, skb); if (!cl->next_alive) cbq_activate_class(cl); return 0; @@ -664,15 +665,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) #endif /* - * It is mission critical procedure. - * - * We "regenerate" toplevel cutoff, if transmitting class - * has backlog and it is not regulated. It is not part of - * original CBQ description, but looks more reasonable. - * Probably, it is wrong. This question needs further investigation. - */ + It is mission critical procedure. -static inline void + We "regenerate" toplevel cutoff, if transmitting class + has backlog and it is not regulated. It is not part of + original CBQ description, but looks more reasonable. + Probably, it is wrong. This question needs further investigation. +*/ + +static __inline__ void cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, struct cbq_class *borrowed) { @@ -683,7 +684,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, q->toplevel = borrowed->level; return; } - } while ((borrowed = borrowed->borrow) != NULL); + } while ((borrowed=borrowed->borrow) != NULL); } #if 0 /* It is not necessary now. Uncommenting it @@ -711,10 +712,10 @@ cbq_update(struct cbq_sched_data *q) cl->bstats.bytes += len; /* - * (now - last) is total time between packet right edges. - * (last_pktlen/rate) is "virtual" busy time, so that - * - * idle = (now - last) - last_pktlen/rate + (now - last) is total time between packet right edges. + (last_pktlen/rate) is "virtual" busy time, so that + + idle = (now - last) - last_pktlen/rate */ idle = q->now - cl->last; @@ -724,9 +725,9 @@ cbq_update(struct cbq_sched_data *q) idle -= L2T(cl, len); /* true_avgidle := (1-W)*true_avgidle + W*idle, - * where W=2^{-ewma_log}. But cl->avgidle is scaled: - * cl->avgidle == true_avgidle/W, - * hence: + where W=2^{-ewma_log}. But cl->avgidle is scaled: + cl->avgidle == true_avgidle/W, + hence: */ avgidle += idle - (avgidle>>cl->ewma_log); } @@ -740,22 +741,22 @@ cbq_update(struct cbq_sched_data *q) cl->avgidle = avgidle; /* Calculate expected time, when this class - * will be allowed to send. - * It will occur, when: - * (1-W)*true_avgidle + W*delay = 0, i.e. - * idle = (1/W - 1)*(-true_avgidle) - * or - * idle = (1 - W)*(-cl->avgidle); + will be allowed to send. + It will occur, when: + (1-W)*true_avgidle + W*delay = 0, i.e. + idle = (1/W - 1)*(-true_avgidle) + or + idle = (1 - W)*(-cl->avgidle); */ idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); /* - * That is not all. - * To maintain the rate allocated to the class, - * we add to undertime virtual clock, - * necessary to complete transmitted packet. - * (len/phys_bandwidth has been already passed - * to the moment of cbq_update) + That is not all. + To maintain the rate allocated to the class, + we add to undertime virtual clock, + necessary to complete transmitted packet. + (len/phys_bandwidth has been already passed + to the moment of cbq_update) */ idle -= L2T(&q->link, len); @@ -777,7 +778,7 @@ cbq_update(struct cbq_sched_data *q) cbq_update_toplevel(q, this, q->tx_borrowed); } -static inline struct cbq_class * +static __inline__ struct cbq_class * cbq_under_limit(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); @@ -793,17 +794,16 @@ cbq_under_limit(struct cbq_class *cl) do { /* It is very suspicious place. Now overlimit - * action is generated for not bounded classes - * only if link is completely congested. - * Though it is in agree with ancestor-only paradigm, - * it looks very stupid. Particularly, - * it means that this chunk of code will either - * never be called or result in strong amplification - * of burstiness. Dangerous, silly, and, however, - * no another solution exists. + action is generated for not bounded classes + only if link is completely congested. + Though it is in agree with ancestor-only paradigm, + it looks very stupid. Particularly, + it means that this chunk of code will either + never be called or result in strong amplification + of burstiness. Dangerous, silly, and, however, + no another solution exists. */ - cl = cl->borrow; - if (!cl) { + if ((cl = cl->borrow) == NULL) { this_cl->qstats.overlimits++; this_cl->overlimit(this_cl); return NULL; @@ -816,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl) return cl; } -static inline struct sk_buff * +static __inline__ struct sk_buff * cbq_dequeue_prio(struct Qdisc *sch, int prio) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -840,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) if (cl->deficit <= 0) { /* Class exhausted its allotment per - * this round. Switch to the next one. + this round. Switch to the next one. */ deficit = 1; cl->deficit += cl->quantum; @@ -850,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) skb = cl->q->dequeue(cl->q); /* Class did not give us any skb :-( - * It could occur even if cl->q->q.qlen != 0 - * f.e. if cl->q == "tbf" + It could occur even if cl->q->q.qlen != 0 + f.e. if cl->q == "tbf" */ if (skb == NULL) goto skip_class; @@ -880,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) skip_class: if (cl->q->q.qlen == 0 || prio != cl->cpriority) { /* Class is empty or penalized. - * Unlink it from active chain. + Unlink it from active chain. */ cl_prev->next_alive = cl->next_alive; cl->next_alive = NULL; @@ -919,14 +919,14 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) return NULL; } -static inline struct sk_buff * +static __inline__ struct sk_buff * cbq_dequeue_1(struct Qdisc *sch) { struct cbq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; - unsigned int activemask; + unsigned activemask; - activemask = q->activemask & 0xFF; + activemask = q->activemask&0xFF; while (activemask) { int prio = ffz(~activemask); activemask &= ~(1<tx_class) { psched_tdiff_t incr2; /* Time integrator. We calculate EOS time - * by adding expected packet transmission time. - * If real time is greater, we warp artificial clock, - * so that: - * - * cbq_time = max(real_time, work); + by adding expected packet transmission time. + If real time is greater, we warp artificial clock, + so that: + + cbq_time = max(real_time, work); */ incr2 = L2T(&q->link, q->tx_len); q->now += incr2; @@ -971,29 +971,28 @@ cbq_dequeue(struct Qdisc *sch) skb = cbq_dequeue_1(sch); if (skb) { - qdisc_bstats_update(sch, skb); sch->q.qlen--; - qdisc_unthrottled(sch); + sch->flags &= ~TCQ_F_THROTTLED; return skb; } /* All the classes are overlimit. - * - * It is possible, if: - * - * 1. Scheduler is empty. - * 2. Toplevel cutoff inhibited borrowing. - * 3. Root class is overlimit. - * - * Reset 2d and 3d conditions and retry. - * - * Note, that NS and cbq-2.0 are buggy, peeking - * an arbitrary class is appropriate for ancestor-only - * sharing, but not for toplevel algorithm. - * - * Our version is better, but slower, because it requires - * two passes, but it is unavoidable with top-level sharing. - */ + + It is possible, if: + + 1. Scheduler is empty. + 2. Toplevel cutoff inhibited borrowing. + 3. Root class is overlimit. + + Reset 2d and 3d conditions and retry. + + Note, that NS and cbq-2.0 are buggy, peeking + an arbitrary class is appropriate for ancestor-only + sharing, but not for toplevel algorithm. + + Our version is better, but slower, because it requires + two passes, but it is unavoidable with top-level sharing. + */ if (q->toplevel == TC_CBQ_MAXLEVEL && q->link.undertime == PSCHED_PASTPERFECT) @@ -1004,8 +1003,7 @@ cbq_dequeue(struct Qdisc *sch) } /* No packets in scheduler or nobody wants to give them to us :-( - * Sigh... start watchdog timer in the last case. - */ + Sigh... start watchdog timer in the last case. */ if (sch->q.qlen) { sch->qstats.overlimits++; @@ -1027,14 +1025,13 @@ static void cbq_adjust_levels(struct cbq_class *this) int level = 0; struct cbq_class *cl; - cl = this->children; - if (cl) { + if ((cl = this->children) != NULL) { do { if (cl->level > level) level = cl->level; } while ((cl = cl->sibling) != this->children); } - this->level = level + 1; + this->level = level+1; } while ((this = this->tparent) != NULL); } @@ -1050,15 +1047,14 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { /* BUGGGG... Beware! This expression suffer of - * arithmetic overflows! + arithmetic overflows! */ if (cl->priority == prio) { cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ q->quanta[prio]; } if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { - pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", - cl->common.classid, cl->quantum); + printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; } } @@ -1069,18 +1065,18 @@ static void cbq_sync_defmap(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_class *split = cl->split; - unsigned int h; + unsigned h; int i; if (split == NULL) return; - for (i = 0; i <= TC_PRIO_MAX; i++) { - if (split->defaults[i] == cl && !(cl->defmap & (1<defaults[i] == cl && !(cl->defmap&(1<defaults[i] = NULL; } - for (i = 0; i <= TC_PRIO_MAX; i++) { + for (i=0; i<=TC_PRIO_MAX; i++) { int level = split->level; if (split->defaults[i]) @@ -1093,7 +1089,7 @@ static void cbq_sync_defmap(struct cbq_class *cl) hlist_for_each_entry(c, n, &q->clhash.hash[h], common.hnode) { if (c->split == split && c->level < level && - c->defmap & (1<defmap&(1<defaults[i] = c; level = c->level; } @@ -1107,8 +1103,7 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma struct cbq_class *split = NULL; if (splitid == 0) { - split = cl->split; - if (!split) + if ((split = cl->split) == NULL) return; splitid = split->common.classid; } @@ -1126,9 +1121,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma cl->defmap = 0; cbq_sync_defmap(cl); cl->split = split; - cl->defmap = def & mask; + cl->defmap = def&mask; } else - cl->defmap = (cl->defmap & ~mask) | (def & mask); + cl->defmap = (cl->defmap&~mask)|(def&mask); cbq_sync_defmap(cl); } @@ -1141,7 +1136,7 @@ static void cbq_unlink_class(struct cbq_class *this) qdisc_class_hash_remove(&q->clhash, &this->common); if (this->tparent) { - clp = &this->sibling; + clp=&this->sibling; cl = *clp; do { if (cl == this) { @@ -1180,7 +1175,7 @@ static void cbq_link_class(struct cbq_class *this) } } -static unsigned int cbq_drop(struct Qdisc *sch) +static unsigned int cbq_drop(struct Qdisc* sch) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl, *cl_head; @@ -1188,8 +1183,7 @@ static unsigned int cbq_drop(struct Qdisc *sch) unsigned int len; for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { - cl_head = q->active[prio]; - if (!cl_head) + if ((cl_head = q->active[prio]) == NULL) continue; cl = cl_head; @@ -1206,13 +1200,13 @@ static unsigned int cbq_drop(struct Qdisc *sch) } static void -cbq_reset(struct Qdisc *sch) +cbq_reset(struct Qdisc* sch) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl; struct hlist_node *n; int prio; - unsigned int h; + unsigned h; q->activemask = 0; q->pmask = 0; @@ -1244,21 +1238,21 @@ cbq_reset(struct Qdisc *sch) static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) { - if (lss->change & TCF_CBQ_LSS_FLAGS) { - cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; - cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; + if (lss->change&TCF_CBQ_LSS_FLAGS) { + cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; + cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; } - if (lss->change & TCF_CBQ_LSS_EWMA) + if (lss->change&TCF_CBQ_LSS_EWMA) cl->ewma_log = lss->ewma_log; - if (lss->change & TCF_CBQ_LSS_AVPKT) + if (lss->change&TCF_CBQ_LSS_AVPKT) cl->avpkt = lss->avpkt; - if (lss->change & TCF_CBQ_LSS_MINIDLE) + if (lss->change&TCF_CBQ_LSS_MINIDLE) cl->minidle = -(long)lss->minidle; - if (lss->change & TCF_CBQ_LSS_MAXIDLE) { + if (lss->change&TCF_CBQ_LSS_MAXIDLE) { cl->maxidle = lss->maxidle; cl->avgidle = lss->maxidle; } - if (lss->change & TCF_CBQ_LSS_OFFTIME) + if (lss->change&TCF_CBQ_LSS_OFFTIME) cl->offtime = lss->offtime; return 0; } @@ -1286,10 +1280,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) if (wrr->weight) cl->weight = wrr->weight; if (wrr->priority) { - cl->priority = wrr->priority - 1; + cl->priority = wrr->priority-1; cl->cpriority = cl->priority; if (cl->priority >= cl->priority2) - cl->priority2 = TC_CBQ_MAXPRIO - 1; + cl->priority2 = TC_CBQ_MAXPRIO-1; } cbq_addprio(q, cl); @@ -1306,10 +1300,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) cl->overlimit = cbq_ovl_delay; break; case TC_CBQ_OVL_LOWPRIO: - if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || - ovl->priority2 - 1 <= cl->priority) + if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || + ovl->priority2-1 <= cl->priority) return -EINVAL; - cl->priority2 = ovl->priority2 - 1; + cl->priority2 = ovl->priority2-1; cl->overlimit = cbq_ovl_lowprio; break; case TC_CBQ_OVL_DROP: @@ -1388,9 +1382,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) if (!q->link.q) q->link.q = &noop_qdisc; - q->link.priority = TC_CBQ_MAXPRIO - 1; - q->link.priority2 = TC_CBQ_MAXPRIO - 1; - q->link.cpriority = TC_CBQ_MAXPRIO - 1; + q->link.priority = TC_CBQ_MAXPRIO-1; + q->link.priority2 = TC_CBQ_MAXPRIO-1; + q->link.cpriority = TC_CBQ_MAXPRIO-1; q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; q->link.overlimit = cbq_ovl_classic; q->link.allot = psched_mtu(qdisc_dev(sch)); @@ -1421,7 +1415,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) return err; } -static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); @@ -1433,7 +1427,7 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) return -1; } -static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_lssopt opt; @@ -1458,15 +1452,15 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) return -1; } -static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_wrropt opt; opt.flags = 0; opt.allot = cl->allot; - opt.priority = cl->priority + 1; - opt.cpriority = cl->cpriority + 1; + opt.priority = cl->priority+1; + opt.cpriority = cl->cpriority+1; opt.weight = cl->weight; NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); return skb->len; @@ -1476,13 +1470,13 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) return -1; } -static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_ovl opt; opt.strategy = cl->ovl_strategy; - opt.priority2 = cl->priority2 + 1; + opt.priority2 = cl->priority2+1; opt.pad = 0; opt.penalty = cl->penalty; NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); @@ -1493,7 +1487,7 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) return -1; } -static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_fopt opt; @@ -1512,7 +1506,7 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) } #ifdef CONFIG_NET_CLS_ACT -static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) +static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_police opt; @@ -1576,7 +1570,7 @@ static int cbq_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; struct nlattr *nest; if (cl->tparent) @@ -1604,7 +1598,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct cbq_sched_data *q = qdisc_priv(sch); - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; cl->qstats.qlen = cl->q->q.qlen; cl->xstats.avgidle = cl->avgidle; @@ -1624,7 +1618,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; if (new == NULL) { new = qdisc_create_dflt(sch->dev_queue, @@ -1647,9 +1641,10 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, return 0; } -static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) +static struct Qdisc * +cbq_leaf(struct Qdisc *sch, unsigned long arg) { - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; return cl->q; } @@ -1688,12 +1683,13 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) kfree(cl); } -static void cbq_destroy(struct Qdisc *sch) +static void +cbq_destroy(struct Qdisc* sch) { struct cbq_sched_data *q = qdisc_priv(sch); struct hlist_node *n, *next; struct cbq_class *cl; - unsigned int h; + unsigned h; #ifdef CONFIG_NET_CLS_ACT q->rx_class = NULL; @@ -1717,7 +1713,7 @@ static void cbq_destroy(struct Qdisc *sch) static void cbq_put(struct Qdisc *sch, unsigned long arg) { - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; if (--cl->refcnt == 0) { #ifdef CONFIG_NET_CLS_ACT @@ -1740,7 +1736,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t { int err; struct cbq_sched_data *q = qdisc_priv(sch); - struct cbq_class *cl = (struct cbq_class *)*arg; + struct cbq_class *cl = (struct cbq_class*)*arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_CBQ_MAX + 1]; struct cbq_class *parent; @@ -1832,14 +1828,13 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (classid) { err = -EINVAL; - if (TC_H_MAJ(classid ^ sch->handle) || - cbq_class_lookup(q, classid)) + if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) goto failure; } else { int i; - classid = TC_H_MAKE(sch->handle, 0x8000); + classid = TC_H_MAKE(sch->handle,0x8000); - for (i = 0; i < 0x8000; i++) { + for (i=0; i<0x8000; i++) { if (++q->hgenerator >= 0x8000) q->hgenerator = 1; if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) @@ -1896,11 +1891,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t cl->minidle = -0x7FFFFFFF; cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); - if (cl->ewma_log == 0) + if (cl->ewma_log==0) cl->ewma_log = q->link.ewma_log; - if (cl->maxidle == 0) + if (cl->maxidle==0) cl->maxidle = q->link.maxidle; - if (cl->avpkt == 0) + if (cl->avpkt==0) cl->avpkt = q->link.avpkt; cl->overlimit = cbq_ovl_classic; if (tb[TCA_CBQ_OVL_STRATEGY]) @@ -1926,7 +1921,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t static int cbq_delete(struct Qdisc *sch, unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; unsigned int qlen; if (cl->filters || cl->children || cl == &q->link) @@ -1984,7 +1979,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { struct cbq_sched_data *q = qdisc_priv(sch); - struct cbq_class *p = (struct cbq_class *)parent; + struct cbq_class *p = (struct cbq_class*)parent; struct cbq_class *cl = cbq_class_lookup(q, classid); if (cl) { @@ -1998,7 +1993,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) { - struct cbq_class *cl = (struct cbq_class *)arg; + struct cbq_class *cl = (struct cbq_class*)arg; cl->filters--; } @@ -2008,7 +2003,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl; struct hlist_node *n; - unsigned int h; + unsigned h; if (arg->stop) return; diff --git a/trunk/net/sched/sch_drr.c b/trunk/net/sched/sch_drr.c index 6b7fe4a84f13..de55e642eafc 100644 --- a/trunk/net/sched/sch_drr.c +++ b/trunk/net/sched/sch_drr.c @@ -376,6 +376,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) } bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return err; @@ -402,7 +403,6 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) skb = qdisc_dequeue_peeked(cl->qdisc); if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); - qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } diff --git a/trunk/net/sched/sch_dsmark.c b/trunk/net/sched/sch_dsmark.c index 2c790204d042..60f4bdd4408e 100644 --- a/trunk/net/sched/sch_dsmark.c +++ b/trunk/net/sched/sch_dsmark.c @@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, mask = nla_get_u8(tb[TCA_DSMARK_MASK]); if (tb[TCA_DSMARK_VALUE]) - p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); + p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); if (tb[TCA_DSMARK_MASK]) - p->mask[*arg - 1] = mask; + p->mask[*arg-1] = mask; err = 0; @@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg) if (!dsmark_valid_index(p, arg)) return -EINVAL; - p->mask[arg - 1] = 0xff; - p->value[arg - 1] = 0; + p->mask[arg-1] = 0xff; + p->value[arg-1] = 0; return 0; } @@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) if (p->mask[i] == 0xff && !p->value[i]) goto ignore; if (walker->count >= walker->skip) { - if (walker->fn(sch, i + 1, walker) < 0) { + if (walker->fn(sch, i+1, walker) < 0) { walker->stop = 1; break; } @@ -260,6 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; } + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -282,7 +283,6 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) if (skb == NULL) return NULL; - qdisc_bstats_update(sch, skb); sch->q.qlen--; index = skb->tc_index & (p->indices - 1); @@ -304,8 +304,9 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) * and don't need yet another qdisc as a bypass. */ if (p->mask[index] != 0xff || p->value[index]) - pr_warning("dsmark_dequeue: unsupported protocol %d\n", - ntohs(skb->protocol)); + printk(KERN_WARNING + "dsmark_dequeue: unsupported protocol %d\n", + ntohs(skb->protocol)); break; } @@ -423,14 +424,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, if (!dsmark_valid_index(p, cl)) return -EINVAL; - tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1); + tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); tcm->tcm_info = p->q->handle; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); - NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); + NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); + NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); return nla_nest_end(skb, opts); diff --git a/trunk/net/sched/sch_fifo.c b/trunk/net/sched/sch_fifo.c index be33f9ddf9dd..aa4d6337e43c 100644 --- a/trunk/net/sched/sch_fifo.c +++ b/trunk/net/sched/sch_fifo.c @@ -19,11 +19,12 @@ /* 1 band FIFO pseudo-"scheduler" */ -struct fifo_sched_data { +struct fifo_sched_data +{ u32 limit; }; -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct fifo_sched_data *q = qdisc_priv(sch); @@ -33,7 +34,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) return qdisc_reshape_fail(skb, sch); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct fifo_sched_data *q = qdisc_priv(sch); @@ -43,16 +44,19 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) return qdisc_reshape_fail(skb, sch); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) { + struct sk_buff *skb_head; struct fifo_sched_data *q = qdisc_priv(sch); if (likely(skb_queue_len(&sch->q) < q->limit)) return qdisc_enqueue_tail(skb, sch); /* queue full, remove one skb to fulfill the limit */ - __qdisc_queue_drop_head(sch, &sch->q); + skb_head = qdisc_dequeue_head(sch); sch->qstats.drops++; + kfree_skb(skb_head); + qdisc_enqueue_tail(skb, sch); return NET_XMIT_CN; @@ -61,13 +65,11 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fifo_init(struct Qdisc *sch, struct nlattr *opt) { struct fifo_sched_data *q = qdisc_priv(sch); - bool bypass; - bool is_bfifo = sch->ops == &bfifo_qdisc_ops; if (opt == NULL) { u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; - if (is_bfifo) + if (sch->ops == &bfifo_qdisc_ops) limit *= psched_mtu(qdisc_dev(sch)); q->limit = limit; @@ -80,15 +82,6 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) q->limit = ctl->limit; } - if (is_bfifo) - bypass = q->limit >= psched_mtu(qdisc_dev(sch)); - else - bypass = q->limit >= 1; - - if (bypass) - sch->flags |= TCQ_F_CAN_BYPASS; - else - sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; } diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 0da09d508737..34dc598440a2 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, */ kfree_skb(skb); if (net_ratelimit()) - pr_warning("Dead loop on netdevice %s, fix it urgently!\n", - dev_queue->dev->name); + printk(KERN_WARNING "Dead loop on netdevice %s, " + "fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* @@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) - pr_warning("BUG %s code %d qlen %d\n", - dev->name, ret, q->q.qlen); + printk(KERN_WARNING "BUG %s code %d qlen %d\n", + dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } @@ -412,9 +412,8 @@ static struct Qdisc noqueue_qdisc = { }; -static const u8 prio2band[TC_PRIO_MAX + 1] = { - 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 -}; +static const u8 prio2band[TC_PRIO_MAX+1] = + { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; /* 3-band FIFO queue: old style, but should be a bit faster than generic prio+fifo combination. @@ -446,7 +445,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, return priv->q + band; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { int band = prio2band[skb->priority & TC_PRIO_MAX]; @@ -461,7 +460,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) return qdisc_drop(skb, qdisc); } -static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) +static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); int band = bitmap2band[priv->bitmap]; @@ -480,7 +479,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) return NULL; } -static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) +static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); int band = bitmap2band[priv->bitmap]; @@ -494,7 +493,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) return NULL; } -static void pfifo_fast_reset(struct Qdisc *qdisc) +static void pfifo_fast_reset(struct Qdisc* qdisc) { int prio; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); @@ -511,7 +510,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; - memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); + memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; @@ -527,8 +526,6 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) skb_queue_head_init(band2list(priv, prio)); - /* Can by-pass the queue discipline */ - qdisc->flags |= TCQ_F_CAN_BYPASS; return 0; } @@ -543,7 +540,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { .dump = pfifo_fast_dump, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(pfifo_fast_ops); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, struct Qdisc_ops *ops) @@ -634,7 +630,7 @@ void qdisc_destroy(struct Qdisc *qdisc) #ifdef CONFIG_NET_SCHED qdisc_list_del(qdisc); - qdisc_put_stab(rtnl_dereference(qdisc->stab)); + qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); if (ops->reset) @@ -678,21 +674,25 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, return oqdisc; } -EXPORT_SYMBOL(dev_graft_qdisc); static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) { - struct Qdisc *qdisc = &noqueue_qdisc; + struct Qdisc *qdisc; if (dev->tx_queue_len) { qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_ROOT); if (!qdisc) { - netdev_info(dev, "activation failed\n"); + printk(KERN_INFO "%s: activation failed\n", dev->name); return; } + + /* Can by-pass the queue discipline for default qdisc */ + qdisc->flags |= TCQ_F_CAN_BYPASS; + } else { + qdisc = &noqueue_qdisc; } dev_queue->qdisc_sleeping = qdisc; } @@ -761,7 +761,6 @@ void dev_activate(struct net_device *dev) dev_watchdog_up(dev); } } -EXPORT_SYMBOL(dev_activate); static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, @@ -841,7 +840,6 @@ void dev_deactivate(struct net_device *dev) list_add(&dev->unreg_list, &single); dev_deactivate_many(&single); } -EXPORT_SYMBOL(dev_deactivate); static void dev_init_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, diff --git a/trunk/net/sched/sch_gred.c b/trunk/net/sched/sch_gred.c index b9493a09a870..51dcc2aa5c92 100644 --- a/trunk/net/sched/sch_gred.c +++ b/trunk/net/sched/sch_gred.c @@ -32,7 +32,8 @@ struct gred_sched_data; struct gred_sched; -struct gred_sched_data { +struct gred_sched_data +{ u32 limit; /* HARD maximal queue length */ u32 DP; /* the drop pramaters */ u32 bytesin; /* bytes seen on virtualQ so far*/ @@ -49,7 +50,8 @@ enum { GRED_RIO_MODE, }; -struct gred_sched { +struct gred_sched +{ struct gred_sched_data *tab[MAX_DPs]; unsigned long flags; u32 red_flags; @@ -148,18 +150,17 @@ static inline int gred_use_harddrop(struct gred_sched *t) return t->red_flags & TC_RED_HARDDROP; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) { - struct gred_sched_data *q = NULL; - struct gred_sched *t = qdisc_priv(sch); + struct gred_sched_data *q=NULL; + struct gred_sched *t= qdisc_priv(sch); unsigned long qavg = 0; u16 dp = tc_index_to_dp(skb); - if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { + if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { dp = t->def; - q = t->tab[dp]; - if (!q) { + if ((q = t->tab[dp]) == NULL) { /* Pass through packets not assigned to a DP * if no default DP has been configured. This * allows for DP flows to be left untouched. @@ -182,7 +183,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) for (i = 0; i < t->DPs; i++) { if (t->tab[i] && t->tab[i]->prio < q->prio && !red_is_idling(&t->tab[i]->parms)) - qavg += t->tab[i]->parms.qavg; + qavg +=t->tab[i]->parms.qavg; } } @@ -202,28 +203,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) gred_store_wred_set(t, q); switch (red_action(&q->parms, q->parms.qavg + qavg)) { - case RED_DONT_MARK: - break; - - case RED_PROB_MARK: - sch->qstats.overlimits++; - if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { - q->stats.prob_drop++; - goto congestion_drop; - } - - q->stats.prob_mark++; - break; - - case RED_HARD_MARK: - sch->qstats.overlimits++; - if (gred_use_harddrop(t) || !gred_use_ecn(t) || - !INET_ECN_set_ce(skb)) { - q->stats.forced_drop++; - goto congestion_drop; - } - q->stats.forced_mark++; - break; + case RED_DONT_MARK: + break; + + case RED_PROB_MARK: + sch->qstats.overlimits++; + if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { + q->stats.prob_drop++; + goto congestion_drop; + } + + q->stats.prob_mark++; + break; + + case RED_HARD_MARK: + sch->qstats.overlimits++; + if (gred_use_harddrop(t) || !gred_use_ecn(t) || + !INET_ECN_set_ce(skb)) { + q->stats.forced_drop++; + goto congestion_drop; + } + q->stats.forced_mark++; + break; } if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { @@ -240,7 +241,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_CN; } -static struct sk_buff *gred_dequeue(struct Qdisc *sch) +static struct sk_buff *gred_dequeue(struct Qdisc* sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); @@ -253,9 +254,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "after dequeue, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + printk(KERN_WARNING "GRED: Unable to relocate " + "VQ 0x%x after dequeue, screwing up " + "backlog.\n", tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); @@ -272,7 +273,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) return NULL; } -static unsigned int gred_drop(struct Qdisc *sch) +static unsigned int gred_drop(struct Qdisc* sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); @@ -285,9 +286,9 @@ static unsigned int gred_drop(struct Qdisc *sch) if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "while dropping, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + printk(KERN_WARNING "GRED: Unable to relocate " + "VQ 0x%x while dropping, screwing up " + "backlog.\n", tc_index_to_dp(skb)); } else { q->backlog -= len; q->stats.other++; @@ -307,7 +308,7 @@ static unsigned int gred_drop(struct Qdisc *sch) } -static void gred_reset(struct Qdisc *sch) +static void gred_reset(struct Qdisc* sch) { int i; struct gred_sched *t = qdisc_priv(sch); @@ -368,8 +369,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) for (i = table->DPs; i < MAX_DPs; i++) { if (table->tab[i]) { - pr_warning("GRED: Warning: Destroying " - "shadowed VQ 0x%x\n", i); + printk(KERN_WARNING "GRED: Warning: Destroying " + "shadowed VQ 0x%x\n", i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; } diff --git a/trunk/net/sched/sch_hfsc.c b/trunk/net/sched/sch_hfsc.c index 6488e6425652..2e45791d4f6c 100644 --- a/trunk/net/sched/sch_hfsc.c +++ b/trunk/net/sched/sch_hfsc.c @@ -81,7 +81,8 @@ * that are expensive on 32-bit architectures. */ -struct internal_sc { +struct internal_sc +{ u64 sm1; /* scaled slope of the 1st segment */ u64 ism1; /* scaled inverse-slope of the 1st segment */ u64 dx; /* the x-projection of the 1st segment */ @@ -91,7 +92,8 @@ struct internal_sc { }; /* runtime service curve */ -struct runtime_sc { +struct runtime_sc +{ u64 x; /* current starting position on x-axis */ u64 y; /* current starting position on y-axis */ u64 sm1; /* scaled slope of the 1st segment */ @@ -102,13 +104,15 @@ struct runtime_sc { u64 ism2; /* scaled inverse-slope of the 2nd segment */ }; -enum hfsc_class_flags { +enum hfsc_class_flags +{ HFSC_RSC = 0x1, HFSC_FSC = 0x2, HFSC_USC = 0x4 }; -struct hfsc_class { +struct hfsc_class +{ struct Qdisc_class_common cl_common; unsigned int refcnt; /* usage count */ @@ -136,8 +140,8 @@ struct hfsc_class { u64 cl_cumul; /* cumulative work in bytes done by real-time criteria */ - u64 cl_d; /* deadline*/ - u64 cl_e; /* eligible time */ + u64 cl_d; /* deadline*/ + u64 cl_e; /* eligible time */ u64 cl_vt; /* virtual time */ u64 cl_f; /* time when this class will fit for link-sharing, max(myf, cfmin) */ @@ -172,7 +176,8 @@ struct hfsc_class { unsigned long cl_nactive; /* number of active children */ }; -struct hfsc_sched { +struct hfsc_sched +{ u16 defcls; /* default class id */ struct hfsc_class root; /* root class */ struct Qdisc_class_hash clhash; /* class hash */ @@ -688,7 +693,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) if (go_active) { n = rb_last(&cl->cl_parent->vt_tree); if (n != NULL) { - max_cl = rb_entry(n, struct hfsc_class, vt_node); + max_cl = rb_entry(n, struct hfsc_class,vt_node); /* * set vt to the average of the min and max * classes. if the parent's period didn't @@ -1172,10 +1177,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return NULL; } #endif - cl = (struct hfsc_class *)res.class; - if (!cl) { - cl = hfsc_find_class(res.classid, sch); - if (!cl) + if ((cl = (struct hfsc_class *)res.class) == NULL) { + if ((cl = hfsc_find_class(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ if (cl->level >= head->level) break; /* filter may only point downwards */ @@ -1313,7 +1316,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) return -1; } -static int +static inline int hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) { if ((cl->cl_flags & HFSC_RSC) && @@ -1417,8 +1420,7 @@ hfsc_schedule_watchdog(struct Qdisc *sch) struct hfsc_class *cl; u64 next_time = 0; - cl = eltree_get_minel(q); - if (cl) + if ((cl = eltree_get_minel(q)) != NULL) next_time = cl->cl_e; if (q->root.cl_cfmin != 0) { if (next_time == 0 || next_time > q->root.cl_cfmin) @@ -1598,6 +1600,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) set_active(cl, qdisc_pkt_len(skb)); bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -1623,8 +1626,7 @@ hfsc_dequeue(struct Qdisc *sch) * find the class with the minimum deadline among * the eligible classes. */ - cl = eltree_get_mindl(q, cur_time); - if (cl) { + if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { realtime = 1; } else { /* @@ -1663,8 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch) set_passive(cl); } - qdisc_unthrottled(sch); - qdisc_bstats_update(sch, skb); + sch->flags &= ~TCQ_F_THROTTLED; sch->q.qlen--; return skb; diff --git a/trunk/net/sched/sch_htb.c b/trunk/net/sched/sch_htb.c index e1429a85091f..984c1b0c6836 100644 --- a/trunk/net/sched/sch_htb.c +++ b/trunk/net/sched/sch_htb.c @@ -99,10 +99,9 @@ struct htb_class { struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ /* When class changes from state 1->2 and disconnects from - * parent's feed then we lost ptr value and start from the - * first child again. Here we store classid of the - * last valid ptr (used when ptr is NULL). - */ + parent's feed then we lost ptr value and start from the + first child again. Here we store classid of the + last valid ptr (used when ptr is NULL). */ u32 last_ptr_id[TC_HTB_NUMPRIO]; } inner; } un; @@ -186,7 +185,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull * then finish and return direct queue. */ -#define HTB_DIRECT ((struct htb_class *)-1L) +#define HTB_DIRECT (struct htb_class*)-1 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) @@ -198,13 +197,11 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int result; /* allow to select class by setting skb->priority to valid classid; - * note that nfmark can be used too by attaching filter fw with no - * rules in it - */ + note that nfmark can be used too by attaching filter fw with no + rules in it */ if (skb->priority == sch->handle) return HTB_DIRECT; /* X:0 (direct flow) selected */ - cl = htb_find(skb->priority, sch); - if (cl && cl->level == 0) + if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) return cl; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; @@ -219,12 +216,10 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } #endif - cl = (void *)res.class; - if (!cl) { + if ((cl = (void *)res.class) == NULL) { if (res.classid == sch->handle) return HTB_DIRECT; /* X:0 (direct flow) */ - cl = htb_find(res.classid, sch); - if (!cl) + if ((cl = htb_find(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ } if (!cl->level) @@ -383,8 +378,7 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) if (p->un.inner.feed[prio].rb_node) /* parent already has its feed in use so that - * reset bit in mask as parent is already ok - */ + reset bit in mask as parent is already ok */ mask &= ~(1 << prio); htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); @@ -419,9 +413,8 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) if (p->un.inner.ptr[prio] == cl->node + prio) { /* we are removing child which is pointed to from - * parent feed - forget the pointer but remember - * classid - */ + parent feed - forget the pointer but remember + classid */ p->un.inner.last_ptr_id[prio] = cl->common.classid; p->un.inner.ptr[prio] = NULL; } @@ -581,6 +574,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } sch->q.qlen++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -670,9 +664,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, unsigned long start) { /* don't run for longer than 2 jiffies; 2 is used instead of - * 1 to simplify things when jiffy is going to be incremented - * too soon - */ + 1 to simplify things when jiffy is going to be incremented + too soon */ unsigned long stop_at = start + 2; while (time_before(jiffies, stop_at)) { struct htb_class *cl; @@ -695,7 +688,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, /* too much load - let's continue after a break for scheduling */ if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { - pr_warning("htb: too many events!\n"); + printk(KERN_WARNING "htb: too many events!\n"); q->warned |= HTB_WARN_TOOMANYEVENTS; } @@ -703,8 +696,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, } /* Returns class->node+prio from id-tree where classe's id is >= id. NULL - * is no such one exists. - */ + is no such one exists. */ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, u32 id) { @@ -748,14 +740,12 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, for (i = 0; i < 65535; i++) { if (!*sp->pptr && *sp->pid) { /* ptr was invalidated but id is valid - try to recover - * the original or next ptr - */ + the original or next ptr */ *sp->pptr = htb_id_find_next_upper(prio, sp->root, *sp->pid); } *sp->pid = 0; /* ptr is valid now so that remove this hint as it - * can become out of date quickly - */ + can become out of date quickly */ if (!*sp->pptr) { /* we are at right end; rewind & go up */ *sp->pptr = sp->root; while ((*sp->pptr)->rb_left) @@ -783,8 +773,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, } /* dequeues packet at given priority and level; call only if - * you are sure that there is active class at prio/level - */ + you are sure that there is active class at prio/level */ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, int level) { @@ -801,10 +790,9 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, return NULL; /* class can be empty - it is unlikely but can be true if leaf - * qdisc drops packets in enqueue routine or if someone used - * graft operation on the leaf since last dequeue; - * simply deactivate and skip such class - */ + qdisc drops packets in enqueue routine or if someone used + graft operation on the leaf since last dequeue; + simply deactivate and skip such class */ if (unlikely(cl->un.leaf.q->q.qlen == 0)) { struct htb_class *next; htb_deactivate(q, cl); @@ -844,8 +832,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, ptr[0]) + prio); } /* this used to be after charge_class but this constelation - * gives us slightly better performance - */ + gives us slightly better performance */ if (!cl->un.leaf.q->q.qlen) htb_deactivate(q, cl); htb_charge_class(q, cl, level, skb); @@ -855,7 +842,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, static struct sk_buff *htb_dequeue(struct Qdisc *sch) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct htb_sched *q = qdisc_priv(sch); int level; psched_time_t next_event; @@ -864,9 +851,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) /* try to dequeue direct packets as high prio (!) to minimize cpu work */ skb = __skb_dequeue(&q->direct_queue); if (skb != NULL) { -ok: - qdisc_bstats_update(sch, skb); - qdisc_unthrottled(sch); + sch->flags &= ~TCQ_F_THROTTLED; sch->q.qlen--; return skb; } @@ -897,11 +882,13 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) m = ~q->row_mask[level]; while (m != (int)(-1)) { int prio = ffz(m); - m |= 1 << prio; skb = htb_dequeue_tree(q, prio, level); - if (likely(skb != NULL)) - goto ok; + if (likely(skb != NULL)) { + sch->q.qlen--; + sch->flags &= ~TCQ_F_THROTTLED; + goto fin; + } } } sch->qstats.overlimits++; @@ -1002,12 +989,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) return err; if (tb[TCA_HTB_INIT] == NULL) { - pr_err("HTB: hey probably you have bad tc tool ?\n"); + printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); return -EINVAL; } gopt = nla_data(tb[TCA_HTB_INIT]); if (gopt->version != HTB_VER >> 16) { - pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n", + printk(KERN_ERR + "HTB: need tc/htb version %d (minor is %d), you have %d\n", HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); return -EINVAL; } @@ -1220,10 +1208,9 @@ static void htb_destroy(struct Qdisc *sch) cancel_work_sync(&q->work); qdisc_watchdog_cancel(&q->watchdog); /* This line used to be after htb_destroy_class call below - * and surprisingly it worked in 2.4. But it must precede it - * because filter need its target class alive to be able to call - * unbind_filter on it (without Oops). - */ + and surprisingly it worked in 2.4. But it must precede it + because filter need its target class alive to be able to call + unbind_filter on it (without Oops). */ tcf_destroy_chain(&q->filter_list); for (i = 0; i < q->clhash.hashsize; i++) { @@ -1357,12 +1344,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* check maximal depth */ if (parent && parent->parent && parent->parent->level < 2) { - pr_err("htb: tree is too deep\n"); + printk(KERN_ERR "htb: tree is too deep\n"); goto failure; } err = -ENOBUFS; - cl = kzalloc(sizeof(*cl), GFP_KERNEL); - if (!cl) + if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) goto failure; err = gen_new_estimator(&cl->bstats, &cl->rate_est, @@ -1382,9 +1368,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, RB_CLEAR_NODE(&cl->node[prio]); /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) - * so that can't be used inside of sch_tree_lock - * -- thanks to Karlis Peisenieks - */ + so that can't be used inside of sch_tree_lock + -- thanks to Karlis Peisenieks */ new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); sch_tree_lock(sch); @@ -1436,18 +1421,17 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } /* it used to be a nasty bug here, we have to check that node - * is really leaf before changing cl->un.leaf ! - */ + is really leaf before changing cl->un.leaf ! */ if (!cl->level) { cl->quantum = rtab->rate.rate / q->rate2quantum; if (!hopt->quantum && cl->quantum < 1000) { - pr_warning( + printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->common.classid); cl->quantum = 1000; } if (!hopt->quantum && cl->quantum > 200000) { - pr_warning( + printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->common.classid); cl->quantum = 200000; @@ -1496,13 +1480,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, struct htb_class *cl = htb_find(classid, sch); /*if (cl && !cl->level) return 0; - * The line above used to be there to prevent attaching filters to - * leaves. But at least tc_index filter uses this just to get class - * for other reasons so that we have to allow for it. - * ---- - * 19.6.2002 As Werner explained it is ok - bind filter is just - * another way to "lock" the class - unlike "get" this lock can - * be broken by class during destroy IIUC. + The line above used to be there to prevent attaching filters to + leaves. But at least tc_index filter uses this just to get class + for other reasons so that we have to allow for it. + ---- + 19.6.2002 As Werner explained it is ok - bind filter is just + another way to "lock" the class - unlike "get" this lock can + be broken by class during destroy IIUC. */ if (cl) cl->filter_cnt++; diff --git a/trunk/net/sched/sch_mq.c b/trunk/net/sched/sch_mq.c index ec5cbc848963..ecc302f4d2a1 100644 --- a/trunk/net/sched/sch_mq.c +++ b/trunk/net/sched/sch_mq.c @@ -61,6 +61,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) TC_H_MIN(ntx + 1))); if (qdisc == NULL) goto err; + qdisc->flags |= TCQ_F_CAN_BYPASS; priv->qdiscs[ntx] = qdisc; } diff --git a/trunk/net/sched/sch_mqprio.c b/trunk/net/sched/sch_mqprio.c deleted file mode 100644 index effd4ee0e880..000000000000 --- a/trunk/net/sched/sch_mqprio.c +++ /dev/null @@ -1,416 +0,0 @@ -/* - * net/sched/sch_mqprio.c - * - * Copyright (c) 2010 John Fastabend - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct mqprio_sched { - struct Qdisc **qdiscs; - int hw_owned; -}; - -static void mqprio_destroy(struct Qdisc *sch) -{ - struct net_device *dev = qdisc_dev(sch); - struct mqprio_sched *priv = qdisc_priv(sch); - unsigned int ntx; - - if (!priv->qdiscs) - return; - - for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) - qdisc_destroy(priv->qdiscs[ntx]); - - if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) - dev->netdev_ops->ndo_setup_tc(dev, 0); - else - netdev_set_num_tc(dev, 0); - - kfree(priv->qdiscs); -} - -static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) -{ - int i, j; - - /* Verify num_tc is not out of max range */ - if (qopt->num_tc > TC_MAX_QUEUE) - return -EINVAL; - - /* Verify priority mapping uses valid tcs */ - for (i = 0; i < TC_BITMASK + 1; i++) { - if (qopt->prio_tc_map[i] >= qopt->num_tc) - return -EINVAL; - } - - /* net_device does not support requested operation */ - if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) - return -EINVAL; - - /* if hw owned qcount and qoffset are taken from LLD so - * no reason to verify them here - */ - if (qopt->hw) - return 0; - - for (i = 0; i < qopt->num_tc; i++) { - unsigned int last = qopt->offset[i] + qopt->count[i]; - - /* Verify the queue count is in tx range being equal to the - * real_num_tx_queues indicates the last queue is in use. - */ - if (qopt->offset[i] >= dev->real_num_tx_queues || - !qopt->count[i] || - last > dev->real_num_tx_queues) - return -EINVAL; - - /* Verify that the offset and counts do not overlap */ - for (j = i + 1; j < qopt->num_tc; j++) { - if (last > qopt->offset[j]) - return -EINVAL; - } - } - - return 0; -} - -static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) -{ - struct net_device *dev = qdisc_dev(sch); - struct mqprio_sched *priv = qdisc_priv(sch); - struct netdev_queue *dev_queue; - struct Qdisc *qdisc; - int i, err = -EOPNOTSUPP; - struct tc_mqprio_qopt *qopt = NULL; - - BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); - BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); - - if (sch->parent != TC_H_ROOT) - return -EOPNOTSUPP; - - if (!netif_is_multiqueue(dev)) - return -EOPNOTSUPP; - - if (nla_len(opt) < sizeof(*qopt)) - return -EINVAL; - - qopt = nla_data(opt); - if (mqprio_parse_opt(dev, qopt)) - return -EINVAL; - - /* pre-allocate qdisc, attachment can't fail */ - priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), - GFP_KERNEL); - if (priv->qdiscs == NULL) { - err = -ENOMEM; - goto err; - } - - for (i = 0; i < dev->num_tx_queues; i++) { - dev_queue = netdev_get_tx_queue(dev, i); - qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, - TC_H_MAKE(TC_H_MAJ(sch->handle), - TC_H_MIN(i + 1))); - if (qdisc == NULL) { - err = -ENOMEM; - goto err; - } - priv->qdiscs[i] = qdisc; - } - - /* If the mqprio options indicate that hardware should own - * the queue mapping then run ndo_setup_tc otherwise use the - * supplied and verified mapping - */ - if (qopt->hw) { - priv->hw_owned = 1; - err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); - if (err) - goto err; - } else { - netdev_set_num_tc(dev, qopt->num_tc); - for (i = 0; i < qopt->num_tc; i++) - netdev_set_tc_queue(dev, i, - qopt->count[i], qopt->offset[i]); - } - - /* Always use supplied priority mappings */ - for (i = 0; i < TC_BITMASK + 1; i++) - netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); - - sch->flags |= TCQ_F_MQROOT; - return 0; - -err: - mqprio_destroy(sch); - return err; -} - -static void mqprio_attach(struct Qdisc *sch) -{ - struct net_device *dev = qdisc_dev(sch); - struct mqprio_sched *priv = qdisc_priv(sch); - struct Qdisc *qdisc; - unsigned int ntx; - - /* Attach underlying qdisc */ - for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = priv->qdiscs[ntx]; - qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); - if (qdisc) - qdisc_destroy(qdisc); - } - kfree(priv->qdiscs); - priv->qdiscs = NULL; -} - -static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, - unsigned long cl) -{ - struct net_device *dev = qdisc_dev(sch); - unsigned long ntx = cl - 1 - netdev_get_num_tc(dev); - - if (ntx >= dev->num_tx_queues) - return NULL; - return netdev_get_tx_queue(dev, ntx); -} - -static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, - struct Qdisc **old) -{ - struct net_device *dev = qdisc_dev(sch); - struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); - - if (!dev_queue) - return -EINVAL; - - if (dev->flags & IFF_UP) - dev_deactivate(dev); - - *old = dev_graft_qdisc(dev_queue, new); - - if (dev->flags & IFF_UP) - dev_activate(dev); - - return 0; -} - -static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) -{ - struct net_device *dev = qdisc_dev(sch); - struct mqprio_sched *priv = qdisc_priv(sch); - unsigned char *b = skb_tail_pointer(skb); - struct tc_mqprio_qopt opt = { 0 }; - struct Qdisc *qdisc; - unsigned int i; - - sch->q.qlen = 0; - memset(&sch->bstats, 0, sizeof(sch->bstats)); - memset(&sch->qstats, 0, sizeof(sch->qstats)); - - for (i = 0; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; - spin_lock_bh(qdisc_lock(qdisc)); - sch->q.qlen += qdisc->q.qlen; - sch->bstats.bytes += qdisc->bstats.bytes; - sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.qlen += qdisc->qstats.qlen; - sch->qstats.backlog += qdisc->qstats.backlog; - sch->qstats.drops += qdisc->qstats.drops; - sch->qstats.requeues += qdisc->qstats.requeues; - sch->qstats.overlimits += qdisc->qstats.overlimits; - spin_unlock_bh(qdisc_lock(qdisc)); - } - - opt.num_tc = netdev_get_num_tc(dev); - memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); - opt.hw = priv->hw_owned; - - for (i = 0; i < netdev_get_num_tc(dev); i++) { - opt.count[i] = dev->tc_to_txq[i].count; - opt.offset[i] = dev->tc_to_txq[i].offset; - } - - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); - - return skb->len; -nla_put_failure: - nlmsg_trim(skb, b); - return -1; -} - -static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) -{ - struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); - - if (!dev_queue) - return NULL; - - return dev_queue->qdisc_sleeping; -} - -static unsigned long mqprio_get(struct Qdisc *sch, u32 classid) -{ - struct net_device *dev = qdisc_dev(sch); - unsigned int ntx = TC_H_MIN(classid); - - if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev)) - return 0; - return ntx; -} - -static void mqprio_put(struct Qdisc *sch, unsigned long cl) -{ -} - -static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, - struct sk_buff *skb, struct tcmsg *tcm) -{ - struct net_device *dev = qdisc_dev(sch); - - if (cl <= netdev_get_num_tc(dev)) { - tcm->tcm_parent = TC_H_ROOT; - tcm->tcm_info = 0; - } else { - int i; - struct netdev_queue *dev_queue; - - dev_queue = mqprio_queue_get(sch, cl); - tcm->tcm_parent = 0; - for (i = 0; i < netdev_get_num_tc(dev); i++) { - struct netdev_tc_txq tc = dev->tc_to_txq[i]; - int q_idx = cl - netdev_get_num_tc(dev); - - if (q_idx > tc.offset && - q_idx <= tc.offset + tc.count) { - tcm->tcm_parent = - TC_H_MAKE(TC_H_MAJ(sch->handle), - TC_H_MIN(i + 1)); - break; - } - } - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; - } - tcm->tcm_handle |= TC_H_MIN(cl); - return 0; -} - -static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, - struct gnet_dump *d) -{ - struct net_device *dev = qdisc_dev(sch); - - if (cl <= netdev_get_num_tc(dev)) { - int i; - struct Qdisc *qdisc; - struct gnet_stats_queue qstats = {0}; - struct gnet_stats_basic_packed bstats = {0}; - struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1]; - - /* Drop lock here it will be reclaimed before touching - * statistics this is required because the d->lock we - * hold here is the look on dev_queue->qdisc_sleeping - * also acquired below. - */ - spin_unlock_bh(d->lock); - - for (i = tc.offset; i < tc.offset + tc.count; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; - spin_lock_bh(qdisc_lock(qdisc)); - bstats.bytes += qdisc->bstats.bytes; - bstats.packets += qdisc->bstats.packets; - qstats.qlen += qdisc->qstats.qlen; - qstats.backlog += qdisc->qstats.backlog; - qstats.drops += qdisc->qstats.drops; - qstats.requeues += qdisc->qstats.requeues; - qstats.overlimits += qdisc->qstats.overlimits; - spin_unlock_bh(qdisc_lock(qdisc)); - } - /* Reclaim root sleeping lock before completing stats */ - spin_lock_bh(d->lock); - if (gnet_stats_copy_basic(d, &bstats) < 0 || - gnet_stats_copy_queue(d, &qstats) < 0) - return -1; - } else { - struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); - - sch = dev_queue->qdisc_sleeping; - sch->qstats.qlen = sch->q.qlen; - if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || - gnet_stats_copy_queue(d, &sch->qstats) < 0) - return -1; - } - return 0; -} - -static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) -{ - struct net_device *dev = qdisc_dev(sch); - unsigned long ntx; - - if (arg->stop) - return; - - /* Walk hierarchy with a virtual class per tc */ - arg->count = arg->skip; - for (ntx = arg->skip; - ntx < dev->num_tx_queues + netdev_get_num_tc(dev); - ntx++) { - if (arg->fn(sch, ntx + 1, arg) < 0) { - arg->stop = 1; - break; - } - arg->count++; - } -} - -static const struct Qdisc_class_ops mqprio_class_ops = { - .graft = mqprio_graft, - .leaf = mqprio_leaf, - .get = mqprio_get, - .put = mqprio_put, - .walk = mqprio_walk, - .dump = mqprio_dump_class, - .dump_stats = mqprio_dump_class_stats, -}; - -struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { - .cl_ops = &mqprio_class_ops, - .id = "mqprio", - .priv_size = sizeof(struct mqprio_sched), - .init = mqprio_init, - .destroy = mqprio_destroy, - .attach = mqprio_attach, - .dump = mqprio_dump, - .owner = THIS_MODULE, -}; - -static int __init mqprio_module_init(void) -{ - return register_qdisc(&mqprio_qdisc_ops); -} - -static void __exit mqprio_module_exit(void) -{ - unregister_qdisc(&mqprio_qdisc_ops); -} - -module_init(mqprio_module_init); -module_exit(mqprio_module_exit); - -MODULE_LICENSE("GPL"); diff --git a/trunk/net/sched/sch_multiq.c b/trunk/net/sched/sch_multiq.c index edc1950e0e77..21f13da24763 100644 --- a/trunk/net/sched/sch_multiq.c +++ b/trunk/net/sched/sch_multiq.c @@ -83,6 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } @@ -111,7 +112,6 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch) qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { - qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } @@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch) unsigned int len; struct Qdisc *qdisc; - for (band = q->bands - 1; band >= 0; band--) { + for (band = q->bands-1; band >= 0; band--) { qdisc = q->queues[band]; if (qdisc->ops->drop) { len = qdisc->ops->drop(qdisc); @@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; - err = multiq_tune(sch, opt); + err = multiq_tune(sch,opt); if (err) kfree(q->queues); @@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, struct multiq_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); - tcm->tcm_info = q->queues[cl - 1]->handle; + tcm->tcm_info = q->queues[cl-1]->handle; return 0; } @@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) arg->count++; continue; } - if (arg->fn(sch, band + 1, arg) < 0) { + if (arg->fn(sch, band+1, arg) < 0) { arg->stop = 1; break; } diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index 64f0d3293b49..1c4bce863479 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } cb = netem_skb_cb(skb); - if (q->gap == 0 || /* not doing reordering */ - q->counter < q->gap || /* inside last reordering gap */ + if (q->gap == 0 || /* not doing reordering */ + q->counter < q->gap || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; @@ -240,6 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; + qdisc_bstats_update(sch, skb); } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; } @@ -248,7 +249,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) return ret; } -static unsigned int netem_drop(struct Qdisc *sch) +static unsigned int netem_drop(struct Qdisc* sch) { struct netem_sched_data *q = qdisc_priv(sch); unsigned int len = 0; @@ -265,7 +266,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; - if (qdisc_is_throttled(sch)) + if (sch->flags & TCQ_F_THROTTLED) return NULL; skb = q->qdisc->ops->peek(q->qdisc); @@ -288,7 +289,6 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) skb->tstamp.tv64 = 0; #endif pr_debug("netem_dequeue: return skb=%p\n", skb); - qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } @@ -476,6 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); + qdisc_bstats_update(sch, nskb); return NET_XMIT_SUCCESS; } diff --git a/trunk/net/sched/sch_prio.c b/trunk/net/sched/sch_prio.c index 2a318f2dc3e5..966158d49dd1 100644 --- a/trunk/net/sched/sch_prio.c +++ b/trunk/net/sched/sch_prio.c @@ -22,7 +22,8 @@ #include -struct prio_sched_data { +struct prio_sched_data +{ int bands; struct tcf_proto *filter_list; u8 prio2band[TC_PRIO_MAX+1]; @@ -53,7 +54,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) if (!q->filter_list || err < 0) { if (TC_H_MAJ(band)) band = 0; - return q->queues[q->prio2band[band & TC_PRIO_MAX]]; + return q->queues[q->prio2band[band&TC_PRIO_MAX]]; } band = res.classid; } @@ -83,6 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } @@ -105,7 +107,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch) return NULL; } -static struct sk_buff *prio_dequeue(struct Qdisc *sch) +static struct sk_buff *prio_dequeue(struct Qdisc* sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; @@ -114,7 +116,6 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) struct Qdisc *qdisc = q->queues[prio]; struct sk_buff *skb = qdisc->dequeue(qdisc); if (skb) { - qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } @@ -123,7 +124,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) } -static unsigned int prio_drop(struct Qdisc *sch) +static unsigned int prio_drop(struct Qdisc* sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; @@ -142,24 +143,24 @@ static unsigned int prio_drop(struct Qdisc *sch) static void -prio_reset(struct Qdisc *sch) +prio_reset(struct Qdisc* sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); - for (prio = 0; prio < q->bands; prio++) + for (prio=0; priobands; prio++) qdisc_reset(q->queues[prio]); sch->q.qlen = 0; } static void -prio_destroy(struct Qdisc *sch) +prio_destroy(struct Qdisc* sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); - for (prio = 0; prio < q->bands; prio++) + for (prio=0; priobands; prio++) qdisc_destroy(q->queues[prio]); } @@ -176,7 +177,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; - for (i = 0; i <= TC_PRIO_MAX; i++) { + for (i=0; i<=TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } @@ -185,7 +186,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); - for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { + for (i=q->bands; iqueues[i]; q->queues[i] = &noop_qdisc; if (child != &noop_qdisc) { @@ -195,10 +196,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) } sch_tree_unlock(sch); - for (i = 0; i < q->bands; i++) { + for (i=0; ibands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; - child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); @@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) struct prio_sched_data *q = qdisc_priv(sch); int i; - for (i = 0; i < TCQ_PRIO_BANDS; i++) + for (i=0; iqueues[i] = &noop_qdisc; if (opt == NULL) { @@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt) } else { int err; - if ((err = prio_tune(sch, opt)) != 0) + if ((err= prio_tune(sch, opt)) != 0) return err; } return 0; @@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_prio_qopt opt; opt.bands = q->bands; - memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); + memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); @@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) arg->count++; continue; } - if (arg->fn(sch, prio + 1, arg) < 0) { + if (arg->fn(sch, prio+1, arg) < 0) { arg->stop = 1; break; } @@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); diff --git a/trunk/net/sched/sch_red.c b/trunk/net/sched/sch_red.c index 6649463da1b6..a6009c5a2c97 100644 --- a/trunk/net/sched/sch_red.c +++ b/trunk/net/sched/sch_red.c @@ -36,7 +36,8 @@ if RED works correctly. */ -struct red_sched_data { +struct red_sched_data +{ u32 limit; /* HARD maximal queue length */ unsigned char flags; struct red_parms parms; @@ -54,7 +55,7 @@ static inline int red_use_harddrop(struct red_sched_data *q) return q->flags & TC_RED_HARDDROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; @@ -66,33 +67,34 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) red_end_of_idle_period(&q->parms); switch (red_action(&q->parms, q->parms.qavg)) { - case RED_DONT_MARK: - break; - - case RED_PROB_MARK: - sch->qstats.overlimits++; - if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { - q->stats.prob_drop++; - goto congestion_drop; - } - - q->stats.prob_mark++; - break; - - case RED_HARD_MARK: - sch->qstats.overlimits++; - if (red_use_harddrop(q) || !red_use_ecn(q) || - !INET_ECN_set_ce(skb)) { - q->stats.forced_drop++; - goto congestion_drop; - } - - q->stats.forced_mark++; - break; + case RED_DONT_MARK: + break; + + case RED_PROB_MARK: + sch->qstats.overlimits++; + if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { + q->stats.prob_drop++; + goto congestion_drop; + } + + q->stats.prob_mark++; + break; + + case RED_HARD_MARK: + sch->qstats.overlimits++; + if (red_use_harddrop(q) || !red_use_ecn(q) || + !INET_ECN_set_ce(skb)) { + q->stats.forced_drop++; + goto congestion_drop; + } + + q->stats.forced_mark++; + break; } ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { + qdisc_bstats_update(sch, skb); sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; @@ -105,24 +107,22 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_CN; } -static struct sk_buff *red_dequeue(struct Qdisc *sch) +static struct sk_buff * red_dequeue(struct Qdisc* sch) { struct sk_buff *skb; struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; skb = child->dequeue(child); - if (skb) { - qdisc_bstats_update(sch, skb); + if (skb) sch->q.qlen--; - } else { - if (!red_is_idling(&q->parms)) - red_start_of_idle_period(&q->parms); - } + else if (!red_is_idling(&q->parms)) + red_start_of_idle_period(&q->parms); + return skb; } -static struct sk_buff *red_peek(struct Qdisc *sch) +static struct sk_buff * red_peek(struct Qdisc* sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; @@ -130,7 +130,7 @@ static struct sk_buff *red_peek(struct Qdisc *sch) return child->ops->peek(child); } -static unsigned int red_drop(struct Qdisc *sch) +static unsigned int red_drop(struct Qdisc* sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; @@ -149,7 +149,7 @@ static unsigned int red_drop(struct Qdisc *sch) return 0; } -static void red_reset(struct Qdisc *sch) +static void red_reset(struct Qdisc* sch) { struct red_sched_data *q = qdisc_priv(sch); @@ -216,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) return 0; } -static int red_init(struct Qdisc *sch, struct nlattr *opt) +static int red_init(struct Qdisc* sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); diff --git a/trunk/net/sched/sch_sfq.c b/trunk/net/sched/sch_sfq.c index 4cff44235773..239ec53a634d 100644 --- a/trunk/net/sched/sch_sfq.c +++ b/trunk/net/sched/sch_sfq.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -77,8 +76,7 @@ #define SFQ_DEPTH 128 /* max number of packets per flow */ #define SFQ_SLOTS 128 /* max number of flows */ #define SFQ_EMPTY_SLOT 255 -#define SFQ_DEFAULT_HASH_DIVISOR 1024 - +#define SFQ_HASH_DIVISOR 1024 /* We use 16 bits to store allot, and want to handle packets up to 64K * Scale allot by 8 (1<<3) so that no overflow occurs. */ @@ -94,7 +92,8 @@ typedef unsigned char sfq_index; * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] * are 'pointers' to dep[] array */ -struct sfq_head { +struct sfq_head +{ sfq_index next; sfq_index prev; }; @@ -109,12 +108,13 @@ struct sfq_slot { short allot; /* credit for this slot */ }; -struct sfq_sched_data { +struct sfq_sched_data +{ /* Parameters */ int perturb_period; - unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ + unsigned quantum; /* Allotment per round: MUST BE >= MTU */ int limit; - unsigned int divisor; /* number of slots in hash table */ + /* Variables */ struct tcf_proto *filter_list; struct timer_list perturb_timer; @@ -122,7 +122,7 @@ struct sfq_sched_data { sfq_index cur_depth; /* depth of longest slot */ unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ struct sfq_slot *tail; /* current slot in round */ - sfq_index *ht; /* Hash table (divisor slots) */ + sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ struct sfq_slot slots[SFQ_SLOTS]; struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */ }; @@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index return &q->dep[val - SFQ_SLOTS]; } -static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) +static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) { - return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1); + return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); } -static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) +static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { u32 h, h2; @@ -157,13 +157,13 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) iph = ip_hdr(skb); h = (__force u32)iph->daddr; h2 = (__force u32)iph->saddr ^ iph->protocol; - if (iph->frag_off & htons(IP_MF | IP_OFFSET)) + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) break; poff = proto_ports_offset(iph->protocol); if (poff >= 0 && pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { iph = ip_hdr(skb); - h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff); + h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff); } break; } @@ -181,7 +181,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) if (poff >= 0 && pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { iph = ipv6_hdr(skb); - h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff); + h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff); } break; } @@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && - TC_H_MIN(skb->priority) <= q->divisor) + TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR) return TC_H_MIN(skb->priority); if (!q->filter_list) @@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, return 0; } #endif - if (TC_H_MIN(res.classid) <= q->divisor) + if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR) return TC_H_MIN(res.classid); } return 0; @@ -402,8 +402,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->tail = slot; slot->allot = q->scaled_quantum; } - if (++sch->q.qlen <= q->limit) + if (++sch->q.qlen <= q->limit) { + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; + } sfq_drop(sch); return NET_XMIT_CN; @@ -443,7 +445,6 @@ sfq_dequeue(struct Qdisc *sch) } skb = slot_dequeue_head(slot); sfq_dec(q, a); - qdisc_bstats_update(sch, skb); sch->q.qlen--; sch->qstats.backlog -= qdisc_pkt_len(skb); @@ -497,11 +498,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) q->perturb_period = ctl->perturb_period * HZ; if (ctl->limit) q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); - if (ctl->divisor) { - if (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536) - return -EINVAL; - q->divisor = ctl->divisor; - } + qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); @@ -519,13 +516,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) static int sfq_init(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); - size_t sz; int i; q->perturb_timer.function = sfq_perturbation; q->perturb_timer.data = (unsigned long)sch; init_timer_deferrable(&q->perturb_timer); + for (i = 0; i < SFQ_HASH_DIVISOR; i++) + q->ht[i] = SFQ_EMPTY_SLOT; + for (i = 0; i < SFQ_DEPTH; i++) { q->dep[i].next = i + SFQ_SLOTS; q->dep[i].prev = i + SFQ_SLOTS; @@ -534,7 +533,6 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) q->limit = SFQ_DEPTH - 1; q->cur_depth = 0; q->tail = NULL; - q->divisor = SFQ_DEFAULT_HASH_DIVISOR; if (opt == NULL) { q->quantum = psched_mtu(qdisc_dev(sch)); q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); @@ -546,23 +544,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) return err; } - sz = sizeof(q->ht[0]) * q->divisor; - q->ht = kmalloc(sz, GFP_KERNEL); - if (!q->ht && sz > PAGE_SIZE) - q->ht = vmalloc(sz); - if (!q->ht) - return -ENOMEM; - for (i = 0; i < q->divisor; i++) - q->ht[i] = SFQ_EMPTY_SLOT; - for (i = 0; i < SFQ_SLOTS; i++) { slot_queue_init(&q->slots[i]); sfq_link(q, i); } - if (q->limit >= 1) - sch->flags |= TCQ_F_CAN_BYPASS; - else - sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; } @@ -573,10 +558,6 @@ static void sfq_destroy(struct Qdisc *sch) tcf_destroy_chain(&q->filter_list); q->perturb_period = 0; del_timer_sync(&q->perturb_timer); - if (is_vmalloc_addr(q->ht)) - vfree(q->ht); - else - kfree(q->ht); } static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) @@ -589,7 +570,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) opt.perturb_period = q->perturb_period / HZ; opt.limit = q->limit; - opt.divisor = q->divisor; + opt.divisor = SFQ_HASH_DIVISOR; opt.flows = q->limit; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); @@ -614,8 +595,6 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid) static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { - /* we cannot bypass queue discipline anymore */ - sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; } @@ -669,7 +648,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) if (arg->stop) return; - for (i = 0; i < q->divisor; i++) { + for (i = 0; i < SFQ_HASH_DIVISOR; i++) { if (q->ht[i] == SFQ_EMPTY_SLOT || arg->count < arg->skip) { arg->count++; diff --git a/trunk/net/sched/sch_tbf.c b/trunk/net/sched/sch_tbf.c index 1dcfb5223a86..77565e721811 100644 --- a/trunk/net/sched/sch_tbf.c +++ b/trunk/net/sched/sch_tbf.c @@ -97,7 +97,8 @@ changed the limit is not effective anymore. */ -struct tbf_sched_data { +struct tbf_sched_data +{ /* Parameters */ u32 limit; /* Maximal length of backlog: bytes */ u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ @@ -114,10 +115,10 @@ struct tbf_sched_data { struct qdisc_watchdog watchdog; /* Watchdog timer */ }; -#define L2T(q, L) qdisc_l2t((q)->R_tab, L) -#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L) +#define L2T(q,L) qdisc_l2t((q)->R_tab,L) +#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); int ret; @@ -133,10 +134,11 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) } sch->q.qlen++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } -static unsigned int tbf_drop(struct Qdisc *sch) +static unsigned int tbf_drop(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); unsigned int len = 0; @@ -148,7 +150,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) return len; } -static struct sk_buff *tbf_dequeue(struct Qdisc *sch) +static struct sk_buff *tbf_dequeue(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; @@ -184,8 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) q->tokens = toks; q->ptokens = ptoks; sch->q.qlen--; - qdisc_unthrottled(sch); - qdisc_bstats_update(sch, skb); + sch->flags &= ~TCQ_F_THROTTLED; return skb; } @@ -208,7 +209,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) return NULL; } -static void tbf_reset(struct Qdisc *sch) +static void tbf_reset(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -226,7 +227,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, }; -static int tbf_change(struct Qdisc *sch, struct nlattr *opt) +static int tbf_change(struct Qdisc* sch, struct nlattr *opt) { int err; struct tbf_sched_data *q = qdisc_priv(sch); @@ -235,7 +236,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; - int max_size, n; + int max_size,n; err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); if (err < 0) @@ -258,18 +259,15 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) } for (n = 0; n < 256; n++) - if (rtab->data[n] > qopt->buffer) - break; - max_size = (n << qopt->rate.cell_log) - 1; + if (rtab->data[n] > qopt->buffer) break; + max_size = (n << qopt->rate.cell_log)-1; if (ptab) { int size; for (n = 0; n < 256; n++) - if (ptab->data[n] > qopt->mtu) - break; - size = (n << qopt->peakrate.cell_log) - 1; - if (size < max_size) - max_size = size; + if (ptab->data[n] > qopt->mtu) break; + size = (n << qopt->peakrate.cell_log)-1; + if (size < max_size) max_size = size; } if (max_size < 0) goto done; @@ -312,7 +310,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) return err; } -static int tbf_init(struct Qdisc *sch, struct nlattr *opt) +static int tbf_init(struct Qdisc* sch, struct nlattr *opt) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -424,7 +422,8 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static const struct Qdisc_class_ops tbf_class_ops = { +static const struct Qdisc_class_ops tbf_class_ops = +{ .graft = tbf_graft, .leaf = tbf_leaf, .get = tbf_get, diff --git a/trunk/net/sched/sch_teql.c b/trunk/net/sched/sch_teql.c index 45cd30098e34..84ce48eadff4 100644 --- a/trunk/net/sched/sch_teql.c +++ b/trunk/net/sched/sch_teql.c @@ -53,7 +53,8 @@ which will not break load balancing, though native slave traffic will have the highest priority. */ -struct teql_master { +struct teql_master +{ struct Qdisc_ops qops; struct net_device *dev; struct Qdisc *slaves; @@ -64,27 +65,29 @@ struct teql_master { unsigned long tx_dropped; }; -struct teql_sched_data { +struct teql_sched_data +{ struct Qdisc *next; struct teql_master *m; struct neighbour *ncache; struct sk_buff_head q; }; -#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) +#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) -#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT) +#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) /* "teql*" qdisc routines */ static int -teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) +teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); if (q->q.qlen < dev->tx_queue_len) { __skb_queue_tail(&q->q, skb); + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -94,7 +97,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) } static struct sk_buff * -teql_dequeue(struct Qdisc *sch) +teql_dequeue(struct Qdisc* sch) { struct teql_sched_data *dat = qdisc_priv(sch); struct netdev_queue *dat_queue; @@ -108,21 +111,19 @@ teql_dequeue(struct Qdisc *sch) dat->m->slaves = sch; netif_wake_queue(m); } - } else { - qdisc_bstats_update(sch, skb); } sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; return skb; } static struct sk_buff * -teql_peek(struct Qdisc *sch) +teql_peek(struct Qdisc* sch) { /* teql is meant to be used as root qdisc */ return NULL; } -static inline void +static __inline__ void teql_neigh_release(struct neighbour *n) { if (n) @@ -130,7 +131,7 @@ teql_neigh_release(struct neighbour *n) } static void -teql_reset(struct Qdisc *sch) +teql_reset(struct Qdisc* sch) { struct teql_sched_data *dat = qdisc_priv(sch); @@ -140,14 +141,13 @@ teql_reset(struct Qdisc *sch) } static void -teql_destroy(struct Qdisc *sch) +teql_destroy(struct Qdisc* sch) { struct Qdisc *q, *prev; struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m; - prev = master->slaves; - if (prev) { + if ((prev = master->slaves) != NULL) { do { q = NEXT_SLAVE(prev); if (q == sch) { @@ -179,7 +179,7 @@ teql_destroy(struct Qdisc *sch) static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) { struct net_device *dev = qdisc_dev(sch); - struct teql_master *m = (struct teql_master *)sch->ops; + struct teql_master *m = (struct teql_master*)sch->ops; struct teql_sched_data *q = qdisc_priv(sch); if (dev->hard_header_len > m->dev->hard_header_len) @@ -290,8 +290,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) nores = 0; busy = 0; - q = start; - if (!q) + if ((q = start) == NULL) goto drop; do { @@ -356,10 +355,10 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) static int teql_master_open(struct net_device *dev) { - struct Qdisc *q; + struct Qdisc * q; struct teql_master *m = netdev_priv(dev); int mtu = 0xFFFE; - unsigned int flags = IFF_NOARP | IFF_MULTICAST; + unsigned flags = IFF_NOARP|IFF_MULTICAST; if (m->slaves == NULL) return -EUNATCH; @@ -427,7 +426,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) do { if (new_mtu > qdisc_dev(q)->mtu) return -EINVAL; - } while ((q = NEXT_SLAVE(q)) != m->slaves); + } while ((q=NEXT_SLAVE(q)) != m->slaves); } dev->mtu = new_mtu; diff --git a/trunk/net/unix/af_unix.c b/trunk/net/unix/af_unix.c index d8d98d5b508c..dd419d286204 100644 --- a/trunk/net/unix/af_unix.c +++ b/trunk/net/unix/af_unix.c @@ -1475,12 +1475,6 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, goto out_free; } - if (sk_filter(other, skb) < 0) { - /* Toss the packet but do not return any error to the sender */ - err = len; - goto out_free; - } - unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) @@ -1984,38 +1978,36 @@ static int unix_shutdown(struct socket *sock, int mode) mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); - if (!mode) - return 0; - - unix_state_lock(sk); - sk->sk_shutdown |= mode; - other = unix_peer(sk); - if (other) - sock_hold(other); - unix_state_unlock(sk); - sk->sk_state_change(sk); - - if (other && - (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { - - int peer_mode = 0; - - if (mode&RCV_SHUTDOWN) - peer_mode |= SEND_SHUTDOWN; - if (mode&SEND_SHUTDOWN) - peer_mode |= RCV_SHUTDOWN; - unix_state_lock(other); - other->sk_shutdown |= peer_mode; - unix_state_unlock(other); - other->sk_state_change(other); - if (peer_mode == SHUTDOWN_MASK) - sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); - else if (peer_mode & RCV_SHUTDOWN) - sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); + if (mode) { + unix_state_lock(sk); + sk->sk_shutdown |= mode; + other = unix_peer(sk); + if (other) + sock_hold(other); + unix_state_unlock(sk); + sk->sk_state_change(sk); + + if (other && + (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { + + int peer_mode = 0; + + if (mode&RCV_SHUTDOWN) + peer_mode |= SEND_SHUTDOWN; + if (mode&SEND_SHUTDOWN) + peer_mode |= RCV_SHUTDOWN; + unix_state_lock(other); + other->sk_shutdown |= peer_mode; + unix_state_unlock(other); + other->sk_state_change(other); + if (peer_mode == SHUTDOWN_MASK) + sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); + else if (peer_mode & RCV_SHUTDOWN) + sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); + } + if (other) + sock_put(other); } - if (other) - sock_put(other); - return 0; } diff --git a/trunk/net/wanrouter/wanmain.c b/trunk/net/wanrouter/wanmain.c index 788a12c1eb5d..74944a2dd436 100644 --- a/trunk/net/wanrouter/wanmain.c +++ b/trunk/net/wanrouter/wanmain.c @@ -59,6 +59,8 @@ #include /* copy_to/from_user */ #include /* __initfunc et al. */ +#define KMEM_SAFETYZONE 8 + #define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev))) /* diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index c565689f0b9f..37693b6ef23a 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -1801,9 +1801,9 @@ void regulatory_hint_disconnect(void) static bool freq_is_chan_12_13_14(u16 freq) { - if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) || - freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) || - freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ)) + if (freq == ieee80211_channel_to_frequency(12) || + freq == ieee80211_channel_to_frequency(13) || + freq == ieee80211_channel_to_frequency(14)) return true; return false; } diff --git a/trunk/net/wireless/util.c b/trunk/net/wireless/util.c index 4ed065d8bb51..7620ae2fcf18 100644 --- a/trunk/net/wireless/util.c +++ b/trunk/net/wireless/util.c @@ -29,37 +29,29 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, } EXPORT_SYMBOL(ieee80211_get_response_rate); -int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) +int ieee80211_channel_to_frequency(int chan) { - /* see 802.11 17.3.8.3.2 and Annex J - * there are overlapping channel numbers in 5GHz and 2GHz bands */ - if (band == IEEE80211_BAND_5GHZ) { - if (chan >= 182 && chan <= 196) - return 4000 + chan * 5; - else - return 5000 + chan * 5; - } else { /* IEEE80211_BAND_2GHZ */ - if (chan == 14) - return 2484; - else if (chan < 14) - return 2407 + chan * 5; - else - return 0; /* not supported */ - } + if (chan < 14) + return 2407 + chan * 5; + + if (chan == 14) + return 2484; + + /* FIXME: 802.11j 17.3.8.3.2 */ + return (chan + 1000) * 5; } EXPORT_SYMBOL(ieee80211_channel_to_frequency); int ieee80211_frequency_to_channel(int freq) { - /* see 802.11 17.3.8.3.2 and Annex J */ if (freq == 2484) return 14; - else if (freq < 2484) + + if (freq < 2484) return (freq - 2407) / 5; - else if (freq >= 4910 && freq <= 4980) - return (freq - 4000) / 5; - else - return (freq - 5000) / 5; + + /* FIXME: 802.11j 17.3.8.3.2 */ + return freq/5 - 1000; } EXPORT_SYMBOL(ieee80211_frequency_to_channel); diff --git a/trunk/net/wireless/wext-compat.c b/trunk/net/wireless/wext-compat.c index 7f1f4ec49041..3e5dbd4e4cd5 100644 --- a/trunk/net/wireless/wext-compat.c +++ b/trunk/net/wireless/wext-compat.c @@ -267,12 +267,9 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) * -EINVAL for impossible things. */ if (freq->e == 0) { - enum ieee80211_band band = IEEE80211_BAND_2GHZ; if (freq->m < 0) return 0; - if (freq->m > 14) - band = IEEE80211_BAND_5GHZ; - return ieee80211_channel_to_frequency(freq->m, band); + return ieee80211_channel_to_frequency(freq->m); } else { int i, div = 1000000; for (i = 0; i < freq->e; i++) diff --git a/trunk/security/keys/Makefile b/trunk/security/keys/Makefile index 1bf090a885fe..6c941050f573 100644 --- a/trunk/security/keys/Makefile +++ b/trunk/security/keys/Makefile @@ -13,8 +13,8 @@ obj-y := \ request_key_auth.o \ user_defined.o -obj-$(CONFIG_TRUSTED_KEYS) += trusted.o -obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o +obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o +obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o obj-$(CONFIG_KEYS_COMPAT) += compat.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o diff --git a/trunk/security/keys/encrypted.c b/trunk/security/keys/encrypted_defined.c similarity index 99% rename from trunk/security/keys/encrypted.c rename to trunk/security/keys/encrypted_defined.c index 9e7e4ce3fae8..28791a65740e 100644 --- a/trunk/security/keys/encrypted.c +++ b/trunk/security/keys/encrypted_defined.c @@ -30,7 +30,7 @@ #include #include -#include "encrypted.h" +#include "encrypted_defined.h" static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; @@ -888,7 +888,6 @@ static int __init init_encrypted(void) out: encrypted_shash_release(); return ret; - } static void __exit cleanup_encrypted(void) diff --git a/trunk/security/keys/encrypted.h b/trunk/security/keys/encrypted_defined.h similarity index 100% rename from trunk/security/keys/encrypted.h rename to trunk/security/keys/encrypted_defined.h diff --git a/trunk/security/keys/trusted.c b/trunk/security/keys/trusted_defined.c similarity index 99% rename from trunk/security/keys/trusted.c rename to trunk/security/keys/trusted_defined.c index 83fc92e297cd..2836c6dc18a3 100644 --- a/trunk/security/keys/trusted.c +++ b/trunk/security/keys/trusted_defined.c @@ -29,7 +29,7 @@ #include #include -#include "trusted.h" +#include "trusted_defined.h" static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; @@ -1032,7 +1032,6 @@ static int trusted_update(struct key *key, const void *data, size_t datalen) ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; - kfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ diff --git a/trunk/security/keys/trusted.h b/trunk/security/keys/trusted_defined.h similarity index 100% rename from trunk/security/keys/trusted.h rename to trunk/security/keys/trusted_defined.h diff --git a/trunk/security/selinux/ss/conditional.c b/trunk/security/selinux/ss/conditional.c index a53373207fb4..c3f845cbcd48 100644 --- a/trunk/security/selinux/ss/conditional.c +++ b/trunk/security/selinux/ss/conditional.c @@ -178,7 +178,7 @@ int cond_init_bool_indexes(struct policydb *p) p->bool_val_to_struct = (struct cond_bool_datum **) kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL); if (!p->bool_val_to_struct) - return -ENOMEM; + return -1; return 0; } diff --git a/trunk/security/selinux/ss/policydb.c b/trunk/security/selinux/ss/policydb.c index 57363562f0f8..be9de3872837 100644 --- a/trunk/security/selinux/ss/policydb.c +++ b/trunk/security/selinux/ss/policydb.c @@ -501,8 +501,8 @@ static int policydb_index(struct policydb *p) if (rc) goto out; - rc = cond_init_bool_indexes(p); - if (rc) + rc = -ENOMEM; + if (cond_init_bool_indexes(p)) goto out; for (i = 0; i < SYM_NUM; i++) { diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 7141c42e1469..2b5387d53ba5 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -204,11 +204,13 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls +EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef +EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations @@ -292,13 +294,6 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all endif -ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y) - CFLAGS := $(CFLAGS) -Wstack-protector -endif - -ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y) - CFLAGS := $(CFLAGS) -Wvolatile-register-var -endif ### --- END CONFIGURATION SECTION --- diff --git a/trunk/tools/perf/builtin-annotate.c b/trunk/tools/perf/builtin-annotate.c index 8879463807e4..c056cdc06912 100644 --- a/trunk/tools/perf/builtin-annotate.c +++ b/trunk/tools/perf/builtin-annotate.c @@ -212,7 +212,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename) continue; offset = start + i; - sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); + sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) continue; @@ -270,9 +270,9 @@ static void hist_entry__print_hits(struct hist_entry *self) for (offset = 0; offset < len; ++offset) if (h->ip[offset] != 0) - printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, + printf("%*Lx: %Lu\n", BITS_PER_LONG / 2, sym->start + offset, h->ip[offset]); - printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); + printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum); } static int hist_entry__tty_annotate(struct hist_entry *he) diff --git a/trunk/tools/perf/builtin-kmem.c b/trunk/tools/perf/builtin-kmem.c index d97256d65980..def7ddc2fd4f 100644 --- a/trunk/tools/perf/builtin-kmem.c +++ b/trunk/tools/perf/builtin-kmem.c @@ -371,10 +371,10 @@ static void __print_result(struct rb_root *root, struct perf_session *session, addr = data->ptr; if (sym != NULL) - snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name, + snprintf(buf, sizeof(buf), "%s+%Lx", sym->name, addr - map->unmap_ip(map, sym->start)); else - snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr); + snprintf(buf, sizeof(buf), "%#Lx", addr); printf(" %-34s |", buf); printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n", diff --git a/trunk/tools/perf/builtin-lock.c b/trunk/tools/perf/builtin-lock.c index 2b36defc5d73..b9c6e5432971 100644 --- a/trunk/tools/perf/builtin-lock.c +++ b/trunk/tools/perf/builtin-lock.c @@ -782,9 +782,9 @@ static void print_result(void) pr_info("%10u ", st->nr_acquired); pr_info("%10u ", st->nr_contended); - pr_info("%15" PRIu64 " ", st->wait_time_total); - pr_info("%15" PRIu64 " ", st->wait_time_max); - pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ? + pr_info("%15llu ", st->wait_time_total); + pr_info("%15llu ", st->wait_time_max); + pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ? 0 : st->wait_time_min); pr_info("\n"); } diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index b2f729fdb317..fcd29e8af29f 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -817,7 +817,7 @@ static int __cmd_record(int argc, const char **argv) * Approximate RIP event size: 24 bytes. */ fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", + "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", (double)bytes_written / 1024.0 / 1024.0, output_name, bytes_written / 24); diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c index c27e31f289e6..75183a4518e6 100644 --- a/trunk/tools/perf/builtin-report.c +++ b/trunk/tools/perf/builtin-report.c @@ -197,7 +197,7 @@ static int process_read_event(event_t *event, struct sample_data *sample __used, event->read.value); } - dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, + dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid, attr ? __event_name(attr->type, attr->config) : "FAIL", event->read.value); diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index 29acb894e035..29e7ffd85690 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -193,7 +193,7 @@ static void calibrate_run_measurement_overhead(void) } run_measurement_overhead = min_delta; - printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); + printf("run measurement overhead: %Ld nsecs\n", min_delta); } static void calibrate_sleep_measurement_overhead(void) @@ -211,7 +211,7 @@ static void calibrate_sleep_measurement_overhead(void) min_delta -= 10000; sleep_measurement_overhead = min_delta; - printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); + printf("sleep measurement overhead: %Ld nsecs\n", min_delta); } static struct sched_atom * @@ -617,13 +617,13 @@ static void test_calibrations(void) burn_nsecs(1e6); T1 = get_nsecs(); - printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); + printf("the run test took %Ld nsecs\n", T1-T0); T0 = get_nsecs(); sleep_nsecs(1e6); T1 = get_nsecs(); - printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); + printf("the sleep test took %Ld nsecs\n", T1-T0); } #define FILL_FIELD(ptr, field, event, data) \ @@ -816,10 +816,10 @@ replay_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); + die("hm, delta: %Ld < 0 ?\n", delta); if (verbose) { - printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", + printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n", switch_event->prev_comm, switch_event->prev_pid, switch_event->next_comm, switch_event->next_pid, delta); @@ -1048,7 +1048,7 @@ latency_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); + die("hm, delta: %Ld < 0 ?\n", delta); sched_out = perf_session__findnew(session, switch_event->prev_pid); @@ -1221,7 +1221,7 @@ static void output_lat_thread(struct work_atoms *work_list) avg = work_list->total_lat / work_list->nb_atoms; - printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n", + printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n", (double)work_list->total_runtime / 1e6, work_list->nb_atoms, (double)avg / 1e6, (double)work_list->max_lat / 1e6, @@ -1423,7 +1423,7 @@ map_switch_event(struct trace_switch_event *switch_event, delta = 0; if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); + die("hm, delta: %Ld < 0 ?\n", delta); sched_out = perf_session__findnew(session, switch_event->prev_pid); @@ -1713,7 +1713,7 @@ static void __cmd_lat(void) } printf(" -----------------------------------------------------------------------------------------\n"); - printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", + printf(" TOTAL: |%11.3f ms |%9Ld |\n", (double)all_runtime/1e6, all_count); printf(" ---------------------------------------------------\n"); diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index b766c2a9ac97..150a606002eb 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -77,8 +77,8 @@ static int process_sample_event(event_t *event, struct sample_data *sample, if (session->sample_type & PERF_SAMPLE_RAW) { if (debug_mode) { if (sample->time < last_timestamp) { - pr_err("Samples misordered, previous: %" PRIu64 - " this: %" PRIu64 "\n", last_timestamp, + pr_err("Samples misordered, previous: %llu " + "this: %llu\n", last_timestamp, sample->time); nr_unordered++; } @@ -126,7 +126,7 @@ static int __cmd_script(struct perf_session *session) ret = perf_session__process_events(session, &event_ops); if (debug_mode) - pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); + pr_err("Misordered timestamps: %llu\n", nr_unordered); return ret; } diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index a482a191a0ca..0ff11d9b13be 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -206,8 +206,8 @@ static int read_counter_aggr(struct perf_evsel *counter) update_stats(&ps->res_stats[i], count[i]); if (verbose) { - fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - event_name(counter), count[0], count[1], count[2]); + fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter), + count[0], count[1], count[2]); } /* diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c index 5dcdba653d70..ed5696198d3d 100644 --- a/trunk/tools/perf/builtin-test.c +++ b/trunk/tools/perf/builtin-test.c @@ -146,7 +146,7 @@ static int test__vmlinux_matches_kallsyms(void) if (llabs(skew) < page_size) continue; - pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", + pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n", sym->start, sym->name, sym->end, pair->end); } else { struct rb_node *nnd; @@ -168,11 +168,11 @@ static int test__vmlinux_matches_kallsyms(void) goto detour; } - pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", + pr_debug("%#Lx: diff name v: %s k: %s\n", sym->start, sym->name, pair->name); } } else - pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); + pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name); err = -1; } @@ -211,10 +211,10 @@ static int test__vmlinux_matches_kallsyms(void) if (pair->start == pos->start) { pair->priv = 1; - pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", + pr_info(" %Lx-%Lx %Lx %s in kallsyms as", pos->start, pos->end, pos->pgoff, pos->dso->name); if (pos->pgoff != pair->pgoff || pos->end != pair->end) - pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", + pr_info(": \n*%Lx-%Lx %Lx", pair->start, pair->end, pair->pgoff); pr_info(" %s\n", pair->dso->name); pair->priv = 1; @@ -307,7 +307,7 @@ static int test__open_syscall_event(void) } if (evsel->counts->cpu[0].val != nr_open_calls) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n", nr_open_calls, evsel->counts->cpu[0].val); goto out_close_fd; } @@ -332,7 +332,8 @@ static int test__open_syscall_event_on_all_cpus(void) struct perf_evsel *evsel; struct perf_event_attr attr; unsigned int nr_open_calls = 111, i; - cpu_set_t cpu_set; + cpu_set_t *cpu_set; + size_t cpu_set_size; int id = trace_event__id("sys_enter_open"); if (id < 0) { @@ -352,8 +353,13 @@ static int test__open_syscall_event_on_all_cpus(void) return -1; } + cpu_set = CPU_ALLOC(cpus->nr); - CPU_ZERO(&cpu_set); + if (cpu_set == NULL) + goto out_thread_map_delete; + + cpu_set_size = CPU_ALLOC_SIZE(cpus->nr); + CPU_ZERO_S(cpu_set_size, cpu_set); memset(&attr, 0, sizeof(attr)); attr.type = PERF_TYPE_TRACEPOINT; @@ -361,7 +367,7 @@ static int test__open_syscall_event_on_all_cpus(void) evsel = perf_evsel__new(&attr, 0); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); - goto out_thread_map_delete; + goto out_cpu_free; } if (perf_evsel__open(evsel, cpus, threads) < 0) { @@ -373,29 +379,14 @@ static int test__open_syscall_event_on_all_cpus(void) for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_open_calls + cpu; - /* - * XXX eventually lift this restriction in a way that - * keeps perf building on older glibc installations - * without CPU_ALLOC. 1024 cpus in 2010 still seems - * a reasonable upper limit tho :-) - */ - if (cpus->map[cpu] >= CPU_SETSIZE) { - pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); - continue; - } - CPU_SET(cpus->map[cpu], &cpu_set); - if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { - pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[cpu], - strerror(errno)); - goto out_close_fd; - } + CPU_SET(cpu, cpu_set); + sched_setaffinity(0, cpu_set_size, cpu_set); for (i = 0; i < ncalls; ++i) { fd = open("/etc/passwd", O_RDONLY); close(fd); } - CPU_CLR(cpus->map[cpu], &cpu_set); + CPU_CLR(cpu, cpu_set); } /* @@ -411,9 +402,6 @@ static int test__open_syscall_event_on_all_cpus(void) for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; - if (cpus->map[cpu] >= CPU_SETSIZE) - continue; - if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__open_read_on_cpu\n"); goto out_close_fd; @@ -421,8 +409,8 @@ static int test__open_syscall_event_on_all_cpus(void) expected = nr_open_calls + cpu; if (evsel->counts->cpu[cpu].val != expected) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", - expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n", + expected, cpu, evsel->counts->cpu[cpu].val); goto out_close_fd; } } @@ -432,6 +420,8 @@ static int test__open_syscall_event_on_all_cpus(void) perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); +out_cpu_free: + CPU_FREE(cpu_set); out_thread_map_delete: thread_map__delete(threads); return err; diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index b6998e055767..05344c6210ac 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include @@ -215,7 +214,7 @@ static int parse_source(struct sym_entry *syme) len = sym->end - sym->start; sprintf(command, - "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s", + "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s", BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start), BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path); @@ -309,7 +308,7 @@ static void lookup_sym_source(struct sym_entry *syme) struct source_line *line; char pattern[PATTERN_LEN + 1]; - sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4, + sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4, map__rip_2objdump(syme->map, symbol->start)); pthread_mutex_lock(&syme->src->lock); @@ -538,7 +537,7 @@ static void print_sym_table(void) if (nr_counters == 1 || !display_weighted) { struct perf_evsel *first; first = list_entry(evsel_list.next, struct perf_evsel, node); - printf("%" PRIu64, (uint64_t)first->attr.sample_period); + printf("%Ld", first->attr.sample_period); if (freq) printf("Hz "); else @@ -641,7 +640,7 @@ static void print_sym_table(void) percent_color_fprintf(stdout, "%4.1f%%", pcnt); if (verbose) - printf(" %016" PRIx64, sym->start); + printf(" %016llx", sym->start); printf(" %-*.*s", sym_width, sym_width, sym->name); printf(" %-*.*s\n", dso_width, dso_width, dso_width >= syme->map->dso->long_name_len ? diff --git a/trunk/tools/perf/util/event.c b/trunk/tools/perf/util/event.c index 1478ab4ee222..2302ec051bb4 100644 --- a/trunk/tools/perf/util/event.c +++ b/trunk/tools/perf/util/event.c @@ -459,8 +459,7 @@ int event__process_comm(event_t *self, struct sample_data *sample __used, int event__process_lost(event_t *self, struct sample_data *sample __used, struct perf_session *session) { - dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", - self->lost.id, self->lost.lost); + dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); session->hists.stats.total_lost += self->lost.lost; return 0; } @@ -576,7 +575,7 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used, u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; int ret = 0; - dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", + dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", self->mmap.pid, self->mmap.tid, self->mmap.start, self->mmap.len, self->mmap.pgoff, self->mmap.filename); diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c index f6a929e74981..989fa2dee2fd 100644 --- a/trunk/tools/perf/util/header.c +++ b/trunk/tools/perf/util/header.c @@ -798,8 +798,8 @@ static int perf_file_section__process(struct perf_file_section *self, int feat, int fd) { if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) { - pr_debug("Failed to lseek to %" PRIu64 " offset for feature " - "%d, continuing...\n", self->offset, feat); + pr_debug("Failed to lseek to %Ld offset for feature %d, " + "continuing...\n", self->offset, feat); return 0; } diff --git a/trunk/tools/perf/util/hist.c b/trunk/tools/perf/util/hist.c index 32f4f1f2f6e4..c749ba6136a0 100644 --- a/trunk/tools/perf/util/hist.c +++ b/trunk/tools/perf/util/hist.c @@ -636,13 +636,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, } } } else - ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); + ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period); if (symbol_conf.show_nr_samples) { if (sep) - ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); + ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period); else - ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); + ret += snprintf(s + ret, size - ret, "%11lld", period); } if (pair_hists) { @@ -971,7 +971,7 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) sym_size = sym->end - sym->start; offset = ip - sym->start; - pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); + pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); if (offset >= sym_size) return 0; @@ -980,9 +980,8 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) h->sum++; h->ip[offset]++; - pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64 - "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name, - ip, ip - self->ms.sym->start, h->ip[offset]); + pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start, + self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]); return 0; } @@ -1133,7 +1132,7 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, goto out_free_filename; } - pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, + pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__, filename, sym->name, map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end)); @@ -1143,7 +1142,7 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, dso, dso->long_name, sym, sym->name); snprintf(command, sizeof(command), - "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", + "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end), symfs_filename, filename); diff --git a/trunk/tools/perf/util/include/linux/bitops.h b/trunk/tools/perf/util/include/linux/bitops.h index 305c8484f200..8be0b968ca0b 100644 --- a/trunk/tools/perf/util/include/linux/bitops.h +++ b/trunk/tools/perf/util/include/linux/bitops.h @@ -2,7 +2,6 @@ #define _PERF_LINUX_BITOPS_H_ #include -#include #include #define BITS_PER_LONG __WORDSIZE diff --git a/trunk/tools/perf/util/map.c b/trunk/tools/perf/util/map.c index a16ecab5229d..3a7eb6ec0eec 100644 --- a/trunk/tools/perf/util/map.c +++ b/trunk/tools/perf/util/map.c @@ -1,6 +1,5 @@ #include "symbol.h" #include -#include #include #include #include @@ -196,7 +195,7 @@ int map__overlap(struct map *l, struct map *r) size_t map__fprintf(struct map *self, FILE *fp) { - return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", + return fprintf(fp, " %Lx-%Lx %Lx %s\n", self->start, self->end, self->pgoff, self->dso->name); } diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 135f69baf966..bc2732ee23eb 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -279,7 +279,7 @@ const char *__event_name(int type, u64 config) static char buf[32]; if (type == PERF_TYPE_RAW) { - sprintf(buf, "raw 0x%" PRIx64, config); + sprintf(buf, "raw 0x%llx", config); return buf; } diff --git a/trunk/tools/perf/util/parse-events.h b/trunk/tools/perf/util/parse-events.h index 458e3ecf17af..b82cafb83772 100644 --- a/trunk/tools/perf/util/parse-events.h +++ b/trunk/tools/perf/util/parse-events.h @@ -23,7 +23,7 @@ struct tracepoint_path { }; extern struct tracepoint_path *tracepoint_id_to_path(u64 config); -extern bool have_tracepoints(struct list_head *evlist); +extern bool have_tracepoints(struct list_head *evsel_list); extern int nr_counters; diff --git a/trunk/tools/perf/util/probe-event.c b/trunk/tools/perf/util/probe-event.c index 6e29d9c9dccc..128aaab0aeda 100644 --- a/trunk/tools/perf/util/probe-event.c +++ b/trunk/tools/perf/util/probe-event.c @@ -172,7 +172,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, sym = __find_kernel_function_by_name(tp->symbol, &map); if (sym) { addr = map->unmap_ip(map, sym->start + tp->offset); - pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol, + pr_debug("try to find %s+%ld@%llx\n", tp->symbol, tp->offset, addr); ret = find_perf_probe_point((unsigned long)addr, pp); } diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index 105f00bfd555..313dac2d94ce 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -652,11 +652,10 @@ static void callchain__printf(struct sample_data *sample) { unsigned int i; - printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); + printf("... chain: nr:%Lu\n", sample->callchain->nr); for (i = 0; i < sample->callchain->nr; i++) - printf("..... %2d: %016" PRIx64 "\n", - i, sample->callchain->ips[i]); + printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]); } static void perf_session__print_tstamp(struct perf_session *session, @@ -673,7 +672,7 @@ static void perf_session__print_tstamp(struct perf_session *session, printf("%u ", sample->cpu); if (session->sample_type & PERF_SAMPLE_TIME) - printf("%" PRIu64 " ", sample->time); + printf("%Lu ", sample->time); } static void dump_event(struct perf_session *session, event_t *event, @@ -682,16 +681,16 @@ static void dump_event(struct perf_session *session, event_t *event, if (!dump_trace) return; - printf("\n%#" PRIx64 " [%#x]: event: %d\n", - file_offset, event->header.size, event->header.type); + printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size, + event->header.type); trace_event(event); if (sample) perf_session__print_tstamp(session, event, sample); - printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, - event->header.size, event__get_event_name(event->header.type)); + printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size, + event__get_event_name(event->header.type)); } static void dump_sample(struct perf_session *session, event_t *event, @@ -700,9 +699,8 @@ static void dump_sample(struct perf_session *session, event_t *event, if (!dump_trace) return; - printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", - event->header.misc, sample->pid, sample->tid, sample->ip, - sample->period); + printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, + sample->pid, sample->tid, sample->ip, sample->period); if (session->sample_type & PERF_SAMPLE_CALLCHAIN) callchain__printf(sample); @@ -845,8 +843,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session, { if (ops->lost == event__process_lost && session->hists.stats.total_lost != 0) { - ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 - "!\n\nCheck IO/CPU overload!\n\n", + ui__warning("Processed %Lu events and LOST %Lu!\n\n" + "Check IO/CPU overload!\n\n", session->hists.stats.total_period, session->hists.stats.total_lost); } @@ -920,7 +918,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, if (size == 0 || (skip = perf_session__process_event(self, &event, ops, head)) < 0) { - dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", + dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", head, event.header.size, event.header.type); /* * assume we lost track of the stream, check alignment, and @@ -1025,7 +1023,7 @@ int __perf_session__process_events(struct perf_session *session, if (size == 0 || perf_session__process_event(session, event, ops, file_pos) < 0) { - dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", + dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", file_offset + head, event->header.size, event->header.type); /* diff --git a/trunk/tools/perf/util/svghelper.c b/trunk/tools/perf/util/svghelper.c index fb737fe9be91..b3637db025a2 100644 --- a/trunk/tools/perf/util/svghelper.c +++ b/trunk/tools/perf/util/svghelper.c @@ -12,7 +12,6 @@ * of the License. */ -#include #include #include #include @@ -44,11 +43,11 @@ static double cpu2y(int cpu) return cpu2slot(cpu) * SLOT_MULT; } -static double time2pixels(u64 __time) +static double time2pixels(u64 time) { double X; - X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time); + X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time); return X; } @@ -95,7 +94,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; fprintf(svgfile, " \n"); - fprintf(svgfile, "\n", svg_page_width, total_height); + fprintf(svgfile, "\n", svg_page_width, total_height); fprintf(svgfile, "\n