From 8d4a6adedbf720e6cb627a01f65e3aa4dcc5be22 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 18 Aug 2008 13:23:52 +0200 Subject: [PATCH] --- yaml --- r: 108849 b: refs/heads/master c: 9bfa35fe422c74882e27cc54450a5f76c96aad68 h: refs/heads/master i: 108847: 9303e010522c7feaff6d6b4ce06b39f89c8b3318 v: v3 --- [refs] | 2 +- trunk/Documentation/devices.txt | 3 + trunk/Documentation/ioctl-number.txt | 1 + trunk/Documentation/rfkill.txt | 5 - trunk/Documentation/usb/auerswald.txt | 30 + trunk/Documentation/usb/power-management.txt | 7 +- trunk/MAINTAINERS | 13 +- trunk/arch/arm/mach-omap2/usb-tusb6010.c | 1 + trunk/arch/sparc/include/asm/irq_64.h | 4 - trunk/arch/sparc/include/asm/of_device.h | 3 +- trunk/arch/sparc64/kernel/irq.c | 52 - trunk/arch/sparc64/kernel/kstack.h | 60 - trunk/arch/sparc64/kernel/process.c | 27 +- trunk/arch/sparc64/kernel/smp.c | 4 + trunk/arch/sparc64/kernel/stacktrace.c | 13 +- trunk/arch/sparc64/kernel/traps.c | 7 +- trunk/arch/sparc64/lib/mcount.S | 39 +- trunk/arch/sparc64/mm/init.c | 11 - trunk/arch/sparc64/mm/ultra.S | 2 + trunk/crypto/digest.c | 2 +- trunk/crypto/tcrypt.c | 28 +- trunk/drivers/Makefile | 1 - trunk/drivers/bluetooth/Kconfig | 10 +- trunk/drivers/bluetooth/btusb.c | 282 +- trunk/drivers/char/hw_random/via-rng.c | 8 - trunk/drivers/char/random.c | 1 - trunk/drivers/crypto/padlock-aes.c | 28 +- trunk/drivers/crypto/padlock-sha.c | 9 - trunk/drivers/crypto/talitos.c | 54 +- trunk/drivers/i2c/chips/isp1301_omap.c | 2 +- trunk/drivers/input/serio/i8042-sparcio.h | 3 +- trunk/drivers/net/Kconfig | 2 +- trunk/drivers/net/acenic.c | 1 + trunk/drivers/net/arm/ixp4xx_eth.c | 6 +- trunk/drivers/net/atl1e/atl1e_ethtool.c | 2 +- trunk/drivers/net/au1000_eth.c | 2 +- trunk/drivers/net/ax88796.c | 4 +- trunk/drivers/net/bnx2.c | 47 +- trunk/drivers/net/bnx2x.h | 87 +- trunk/drivers/net/bnx2x_fw_defs.h | 160 +- trunk/drivers/net/bnx2x_hsi.h | 16 +- trunk/drivers/net/bnx2x_init.h | 26 +- trunk/drivers/net/bnx2x_init_values.h | 533 ++-- trunk/drivers/net/bnx2x_link.c | 1259 ++++----- trunk/drivers/net/bnx2x_link.h | 11 +- trunk/drivers/net/bnx2x_main.c | 1213 ++++----- trunk/drivers/net/bnx2x_reg.h | 210 +- trunk/drivers/net/cpmac.c | 1 + trunk/drivers/net/e1000e/defines.h | 2 +- trunk/drivers/net/e1000e/e1000.h | 1 - trunk/drivers/net/e1000e/ethtool.c | 2 +- trunk/drivers/net/e1000e/netdev.c | 185 +- trunk/drivers/net/e1000e/param.c | 25 +- trunk/drivers/net/gianfar.c | 6 +- trunk/drivers/net/gianfar_sysfs.c | 1 + trunk/drivers/net/ipg.h | 2 + trunk/drivers/net/ixgbe/ixgbe_82598.c | 1 - trunk/drivers/net/ixgbe/ixgbe_main.c | 4 +- trunk/drivers/net/ixgbe/ixgbe_type.h | 1 - trunk/drivers/net/loopback.c | 67 + trunk/drivers/net/myri10ge/myri10ge.c | 6 +- trunk/drivers/net/ne.c | 4 +- trunk/drivers/net/netxen/netxen_nic.h | 7 +- trunk/drivers/net/netxen/netxen_nic_hw.c | 59 +- trunk/drivers/net/netxen/netxen_nic_init.c | 28 +- trunk/drivers/net/netxen/netxen_nic_main.c | 210 +- .../drivers/net/netxen/netxen_nic_phan_reg.h | 2 - trunk/drivers/net/ppp_mppe.c | 1 + trunk/drivers/net/pppol2tp.c | 1 + trunk/drivers/net/r6040.c | 1 + trunk/drivers/net/sh_eth.c | 1 + trunk/drivers/net/sky2.c | 8 +- trunk/drivers/net/tehuti.h | 1 + trunk/drivers/net/tg3.c | 101 +- trunk/drivers/net/tg3.h | 6 - trunk/drivers/net/tlan.c | 8 +- trunk/drivers/net/tun.c | 105 +- trunk/drivers/net/typhoon.c | 1 + trunk/drivers/net/usb/Kconfig | 21 +- trunk/drivers/net/usb/hso.c | 53 +- trunk/drivers/net/wireless/ath5k/base.c | 9 +- trunk/drivers/net/wireless/ath9k/hw.c | 6 +- trunk/drivers/net/wireless/b43/main.c | 3 +- trunk/drivers/net/wireless/ipw2100.c | 1 + trunk/drivers/net/wireless/ipw2200.c | 1 + trunk/drivers/net/wireless/iwlwifi/iwl-3945.c | 1 + trunk/drivers/net/wireless/iwlwifi/iwl-4965.c | 3 +- trunk/drivers/net/wireless/iwlwifi/iwl-5000.c | 1 + trunk/drivers/net/wireless/iwlwifi/iwl-agn.c | 1 + trunk/drivers/net/wireless/iwlwifi/iwl-core.c | 1 + .../drivers/net/wireless/iwlwifi/iwl-eeprom.c | 7 +- trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c | 1 + .../drivers/net/wireless/iwlwifi/iwl-power.c | 1 + trunk/drivers/net/wireless/iwlwifi/iwl-sta.c | 4 +- trunk/drivers/net/wireless/iwlwifi/iwl-tx.c | 4 +- .../net/wireless/iwlwifi/iwl3945-base.c | 7 +- trunk/drivers/net/wireless/p54/p54common.c | 51 +- trunk/drivers/net/wireless/p54/p54common.h | 18 +- trunk/drivers/net/wireless/p54/p54usb.c | 10 - .../drivers/net/wireless/rt2x00/rt2x00queue.h | 8 +- trunk/drivers/net/wireless/rt2x00/rt2x00usb.c | 1 - trunk/drivers/net/wireless/rtl8187_dev.c | 1 - trunk/drivers/sbus/sbus.c | 2 +- trunk/drivers/serial/sunhv.c | 2 +- trunk/drivers/serial/sunsab.c | 2 +- trunk/drivers/serial/sunsu.c | 2 +- trunk/drivers/serial/sunzilog.c | 2 +- trunk/drivers/ssb/main.c | 8 - trunk/drivers/usb/Kconfig | 6 +- trunk/drivers/usb/atm/cxacru.c | 2 +- trunk/drivers/usb/class/cdc-acm.c | 86 +- trunk/drivers/usb/class/cdc-acm.h | 3 +- trunk/drivers/usb/core/driver.c | 5 +- trunk/drivers/usb/core/message.c | 2 +- trunk/drivers/usb/gadget/Kconfig | 10 - trunk/drivers/usb/gadget/dummy_hcd.c | 5 +- trunk/drivers/usb/gadget/f_acm.c | 196 +- trunk/drivers/usb/gadget/f_ecm.c | 2 + trunk/drivers/usb/gadget/f_rndis.c | 2 + trunk/drivers/usb/gadget/f_serial.c | 2 + trunk/drivers/usb/gadget/f_subset.c | 2 + trunk/drivers/usb/gadget/gadget_chips.h | 6 - trunk/drivers/usb/gadget/omap_udc.c | 5 +- trunk/drivers/usb/gadget/u_serial.c | 290 +-- trunk/drivers/usb/gadget/u_serial.h | 12 +- trunk/drivers/usb/host/isp1760-hcd.c | 53 +- trunk/drivers/usb/host/isp1760-hcd.h | 5 - trunk/drivers/usb/host/ohci-hcd.c | 23 +- trunk/drivers/usb/host/ohci-hub.c | 11 +- trunk/drivers/usb/host/ohci-omap.c | 3 +- trunk/drivers/usb/host/ohci-pci.c | 132 - trunk/drivers/usb/host/ohci-q.c | 6 - trunk/drivers/usb/host/ohci.h | 11 - trunk/drivers/usb/host/r8a66597-hcd.c | 49 +- trunk/drivers/usb/misc/Kconfig | 10 + trunk/drivers/usb/misc/Makefile | 1 + trunk/drivers/usb/misc/auerswald.c | 2152 ++++++++++++++++ trunk/drivers/usb/musb/Kconfig | 176 -- trunk/drivers/usb/musb/Makefile | 86 - trunk/drivers/usb/musb/cppi_dma.c | 1540 ----------- trunk/drivers/usb/musb/cppi_dma.h | 133 - trunk/drivers/usb/musb/davinci.c | 462 ---- trunk/drivers/usb/musb/davinci.h | 100 - trunk/drivers/usb/musb/musb_core.c | 2261 ----------------- trunk/drivers/usb/musb/musb_core.h | 507 ---- trunk/drivers/usb/musb/musb_debug.h | 66 - trunk/drivers/usb/musb/musb_dma.h | 172 -- trunk/drivers/usb/musb/musb_gadget.c | 2031 --------------- trunk/drivers/usb/musb/musb_gadget.h | 108 - trunk/drivers/usb/musb/musb_gadget_ep0.c | 981 ------- trunk/drivers/usb/musb/musb_host.c | 2170 ---------------- trunk/drivers/usb/musb/musb_host.h | 110 - trunk/drivers/usb/musb/musb_io.h | 115 - trunk/drivers/usb/musb/musb_procfs.c | 830 ------ trunk/drivers/usb/musb/musb_regs.h | 300 --- trunk/drivers/usb/musb/musb_virthub.c | 425 ---- trunk/drivers/usb/musb/musbhsdma.c | 433 ---- trunk/drivers/usb/musb/omap2430.c | 324 --- trunk/drivers/usb/musb/omap2430.h | 56 - trunk/drivers/usb/musb/tusb6010.c | 1151 --------- trunk/drivers/usb/musb/tusb6010.h | 233 -- trunk/drivers/usb/musb/tusb6010_omap.c | 719 ------ trunk/drivers/usb/serial/Kconfig | 7 +- trunk/drivers/usb/serial/ftdi_sio.c | 6 - trunk/drivers/usb/serial/ftdi_sio.h | 7 - trunk/drivers/usb/serial/option.c | 44 +- trunk/drivers/usb/serial/pl2303.c | 1 + trunk/drivers/usb/serial/pl2303.h | 4 + trunk/drivers/usb/serial/sierra.c | 170 +- trunk/drivers/usb/serial/usb-serial.c | 7 +- trunk/drivers/usb/storage/Kconfig | 12 - trunk/drivers/usb/storage/Makefile | 1 - trunk/drivers/usb/storage/sierra_ms.c | 207 -- trunk/drivers/usb/storage/sierra_ms.h | 4 - trunk/drivers/usb/storage/transport.c | 17 +- trunk/drivers/usb/storage/unusual_devs.h | 40 +- trunk/drivers/usb/storage/usb.c | 3 - trunk/fs/dlm/config.c | 203 +- trunk/fs/dlm/user.c | 10 +- trunk/fs/xfs/linux-2.6/sema.h | 52 + trunk/fs/xfs/linux-2.6/xfs_aops.c | 3 +- trunk/fs/xfs/linux-2.6/xfs_buf.c | 16 +- trunk/fs/xfs/linux-2.6/xfs_buf.h | 4 +- trunk/fs/xfs/linux-2.6/xfs_export.c | 10 +- trunk/fs/xfs/linux-2.6/xfs_fs_subr.c | 6 +- trunk/fs/xfs/linux-2.6/xfs_ioctl.c | 4 +- trunk/fs/xfs/linux-2.6/xfs_iops.c | 192 +- trunk/fs/xfs/linux-2.6/xfs_iops.h | 15 +- trunk/fs/xfs/linux-2.6/xfs_linux.h | 6 +- trunk/fs/xfs/linux-2.6/xfs_lrw.c | 6 +- trunk/fs/xfs/linux-2.6/xfs_super.c | 189 +- trunk/fs/xfs/linux-2.6/xfs_super.h | 3 + trunk/fs/xfs/linux-2.6/xfs_vnode.c | 22 +- trunk/fs/xfs/linux-2.6/xfs_vnode.h | 65 +- trunk/fs/xfs/quota/xfs_dquot.c | 38 +- trunk/fs/xfs/quota/xfs_dquot.h | 29 +- trunk/fs/xfs/quota/xfs_dquot_item.c | 8 +- trunk/fs/xfs/quota/xfs_qm.c | 14 +- trunk/fs/xfs/quota/xfs_qm.h | 2 +- trunk/fs/xfs/quota/xfs_qm_bhv.c | 7 +- trunk/fs/xfs/quota/xfs_qm_syscalls.c | 4 +- trunk/fs/xfs/xfs_acl.c | 52 +- trunk/fs/xfs/xfs_acl.h | 14 +- trunk/fs/xfs/xfs_arch.h | 68 + trunk/fs/xfs/xfs_attr.c | 110 +- trunk/fs/xfs/xfs_attr.h | 1 - trunk/fs/xfs/xfs_attr_leaf.c | 75 +- trunk/fs/xfs/xfs_attr_leaf.h | 2 + trunk/fs/xfs/xfs_bit.c | 103 + trunk/fs/xfs/xfs_bit.h | 34 +- trunk/fs/xfs/xfs_bmap.c | 34 +- trunk/fs/xfs/xfs_btree.c | 105 +- trunk/fs/xfs/xfs_btree.h | 8 +- trunk/fs/xfs/xfs_buf_item.c | 4 +- trunk/fs/xfs/xfs_dfrag.c | 33 +- trunk/fs/xfs/xfs_error.c | 5 +- trunk/fs/xfs/xfs_error.h | 12 +- trunk/fs/xfs/xfs_filestream.c | 2 +- trunk/fs/xfs/xfs_ialloc_btree.c | 30 +- trunk/fs/xfs/xfs_iget.c | 48 +- trunk/fs/xfs/xfs_inode.c | 70 +- trunk/fs/xfs/xfs_inode.h | 46 +- trunk/fs/xfs/xfs_inode_item.c | 11 +- trunk/fs/xfs/xfs_itable.c | 4 +- trunk/fs/xfs/xfs_log.c | 86 +- trunk/fs/xfs/xfs_log.h | 2 +- trunk/fs/xfs/xfs_log_priv.h | 14 +- trunk/fs/xfs/xfs_log_recover.c | 7 +- trunk/fs/xfs/xfs_mount.c | 82 +- trunk/fs/xfs/xfs_mount.h | 17 +- trunk/fs/xfs/xfs_rtalloc.c | 19 +- trunk/fs/xfs/xfs_rw.c | 2 +- trunk/fs/xfs/xfs_trans.c | 75 +- trunk/fs/xfs/xfs_trans.h | 12 +- trunk/fs/xfs/xfs_trans_buf.c | 12 +- trunk/fs/xfs/xfs_trans_item.c | 66 +- trunk/fs/xfs/xfs_utils.c | 4 +- trunk/fs/xfs/xfs_utils.h | 3 + trunk/fs/xfs/xfs_vfsops.c | 13 +- trunk/fs/xfs/xfs_vnodeops.c | 198 +- .../include/asm => include/asm-h8300}/Kbuild | 0 .../include/asm => include/asm-h8300}/a.out.h | 0 .../asm => include/asm-h8300}/atomic.h | 0 .../asm => include/asm-h8300}/auxvec.h | 0 .../asm => include/asm-h8300}/bitops.h | 0 .../asm => include/asm-h8300}/bootinfo.h | 0 .../include/asm => include/asm-h8300}/bug.h | 0 .../include/asm => include/asm-h8300}/bugs.h | 0 .../asm => include/asm-h8300}/byteorder.h | 0 .../include/asm => include/asm-h8300}/cache.h | 0 .../asm => include/asm-h8300}/cachectl.h | 0 .../asm => include/asm-h8300}/cacheflush.h | 0 .../asm => include/asm-h8300}/checksum.h | 0 .../asm => include/asm-h8300}/cputime.h | 0 .../asm => include/asm-h8300}/current.h | 0 .../include/asm => include/asm-h8300}/dbg.h | 0 .../include/asm => include/asm-h8300}/delay.h | 0 .../asm => include/asm-h8300}/device.h | 0 .../include/asm => include/asm-h8300}/div64.h | 0 .../include/asm => include/asm-h8300}/dma.h | 0 .../include/asm => include/asm-h8300}/elf.h | 0 .../asm-h8300}/emergency-restart.h | 0 .../include/asm => include/asm-h8300}/errno.h | 0 .../include/asm => include/asm-h8300}/fb.h | 0 .../include/asm => include/asm-h8300}/fcntl.h | 0 .../include/asm => include/asm-h8300}/flat.h | 0 .../include/asm => include/asm-h8300}/fpu.h | 0 .../include/asm => include/asm-h8300}/futex.h | 0 .../include/asm => include/asm-h8300}/gpio.h | 0 .../asm => include/asm-h8300}/hardirq.h | 0 .../asm => include/asm-h8300}/hw_irq.h | 0 .../include/asm => include/asm-h8300}/io.h | 0 .../include/asm => include/asm-h8300}/ioctl.h | 0 .../asm => include/asm-h8300}/ioctls.h | 0 .../asm => include/asm-h8300}/ipcbuf.h | 0 .../include/asm => include/asm-h8300}/irq.h | 0 .../asm => include/asm-h8300}/irq_regs.h | 0 .../asm => include/asm-h8300}/kdebug.h | 0 .../asm => include/asm-h8300}/kmap_types.h | 0 .../asm => include/asm-h8300}/linkage.h | 0 .../include/asm => include/asm-h8300}/local.h | 0 .../asm => include/asm-h8300}/mc146818rtc.h | 0 .../include/asm => include/asm-h8300}/md.h | 0 .../include/asm => include/asm-h8300}/mman.h | 0 .../include/asm => include/asm-h8300}/mmu.h | 0 .../asm => include/asm-h8300}/mmu_context.h | 0 .../asm => include/asm-h8300}/module.h | 0 .../asm => include/asm-h8300}/msgbuf.h | 0 .../include/asm => include/asm-h8300}/mutex.h | 0 .../include/asm => include/asm-h8300}/page.h | 0 .../asm => include/asm-h8300}/page_offset.h | 0 .../include/asm => include/asm-h8300}/param.h | 0 .../include/asm => include/asm-h8300}/pci.h | 0 .../asm => include/asm-h8300}/percpu.h | 0 .../asm => include/asm-h8300}/pgalloc.h | 0 .../asm => include/asm-h8300}/pgtable.h | 0 .../include/asm => include/asm-h8300}/poll.h | 0 .../asm => include/asm-h8300}/posix_types.h | 0 .../asm => include/asm-h8300}/processor.h | 0 .../asm => include/asm-h8300}/ptrace.h | 0 .../asm => include/asm-h8300}/regs267x.h | 0 .../asm => include/asm-h8300}/regs306x.h | 0 .../asm => include/asm-h8300}/resource.h | 0 .../asm => include/asm-h8300}/scatterlist.h | 0 .../asm => include/asm-h8300}/sections.h | 0 .../asm => include/asm-h8300}/segment.h | 0 .../asm => include/asm-h8300}/sembuf.h | 0 .../include/asm => include/asm-h8300}/setup.h | 0 .../asm => include/asm-h8300}/sh_bios.h | 0 .../include/asm => include/asm-h8300}/shm.h | 0 .../asm => include/asm-h8300}/shmbuf.h | 0 .../asm => include/asm-h8300}/shmparam.h | 0 .../asm => include/asm-h8300}/sigcontext.h | 0 .../asm => include/asm-h8300}/siginfo.h | 0 .../asm => include/asm-h8300}/signal.h | 0 .../include/asm => include/asm-h8300}/smp.h | 0 .../asm => include/asm-h8300}/socket.h | 0 .../asm => include/asm-h8300}/sockios.h | 0 .../asm => include/asm-h8300}/spinlock.h | 0 .../include/asm => include/asm-h8300}/stat.h | 0 .../asm => include/asm-h8300}/statfs.h | 0 .../asm => include/asm-h8300}/string.h | 0 .../asm => include/asm-h8300}/system.h | 0 .../asm => include/asm-h8300}/target_time.h | 0 .../asm => include/asm-h8300}/termbits.h | 0 .../asm => include/asm-h8300}/termios.h | 0 .../asm => include/asm-h8300}/thread_info.h | 0 .../include/asm => include/asm-h8300}/timex.h | 0 .../include/asm => include/asm-h8300}/tlb.h | 0 .../asm => include/asm-h8300}/tlbflush.h | 0 .../asm => include/asm-h8300}/topology.h | 0 .../include/asm => include/asm-h8300}/traps.h | 0 .../include/asm => include/asm-h8300}/types.h | 0 .../asm => include/asm-h8300}/uaccess.h | 0 .../asm => include/asm-h8300}/ucontext.h | 0 .../asm => include/asm-h8300}/unaligned.h | 0 .../asm => include/asm-h8300}/unistd.h | 0 .../include/asm => include/asm-h8300}/user.h | 0 .../asm => include/asm-h8300}/virtconvert.h | 0 trunk/include/asm-x86/i387.h | 32 - trunk/include/crypto/hash.h | 18 - trunk/include/linux/completion.h | 45 - trunk/include/linux/cred.h | 50 - trunk/include/linux/if_tun.h | 1 - trunk/include/linux/sched.h | 1 - trunk/include/linux/skbuff.h | 10 +- trunk/include/linux/usb.h | 2 - trunk/include/linux/usb/musb.h | 98 - trunk/include/linux/usb/serial.h | 3 +- trunk/include/net/addrconf.h | 3 +- trunk/include/net/ip6_route.h | 7 +- trunk/include/net/ip_vs.h | 32 +- trunk/include/net/mac80211.h | 11 +- trunk/include/net/pkt_sched.h | 5 +- trunk/include/net/sch_generic.h | 2 +- trunk/net/bridge/br_device.c | 15 +- trunk/net/core/datagram.c | 87 - trunk/net/core/dev.c | 47 +- trunk/net/core/pktgen.c | 29 - trunk/net/core/skbuff.c | 12 +- trunk/net/dccp/input.c | 12 +- trunk/net/dccp/proto.c | 5 - trunk/net/ipv4/igmp.c | 71 +- trunk/net/ipv4/ipvs/ip_vs_app.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_conn.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_ctl.c | 27 +- trunk/net/ipv4/ipvs/ip_vs_dh.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_est.c | 116 +- trunk/net/ipv4/ipvs/ip_vs_lblc.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_lblcr.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_lc.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_nq.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_proto.c | 4 +- trunk/net/ipv4/ipvs/ip_vs_rr.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_sched.c | 4 +- trunk/net/ipv4/ipvs/ip_vs_sed.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_sh.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_sync.c | 4 +- trunk/net/ipv4/ipvs/ip_vs_wlc.c | 2 +- trunk/net/ipv4/ipvs/ip_vs_wrr.c | 2 +- trunk/net/ipv4/netfilter/ipt_addrtype.c | 2 +- .../net/ipv4/netfilter/nf_nat_proto_common.c | 8 +- trunk/net/ipv4/route.c | 76 +- trunk/net/ipv4/udp.c | 6 +- trunk/net/ipv6/addrconf.c | 3 +- trunk/net/ipv6/fib6_rules.c | 3 +- trunk/net/ipv6/ip6_fib.c | 1 - trunk/net/ipv6/ip6_output.c | 2 +- trunk/net/ipv6/ipv6_sockglue.c | 4 +- trunk/net/ipv6/ndisc.c | 2 +- trunk/net/ipv6/route.c | 13 +- trunk/net/ipv6/udp.c | 6 +- trunk/net/ipv6/xfrm6_policy.c | 4 +- trunk/net/mac80211/mlme.c | 2 - trunk/net/netfilter/nf_conntrack_netlink.c | 36 +- trunk/net/rfkill/rfkill.c | 14 +- trunk/net/rxrpc/ar-accept.c | 2 +- trunk/net/sched/act_api.c | 13 +- trunk/net/sched/cls_api.c | 2 +- trunk/net/sched/sch_api.c | 69 +- trunk/net/sched/sch_cbq.c | 2 +- trunk/net/sched/sch_generic.c | 73 +- trunk/net/sched/sch_htb.c | 7 +- trunk/net/sched/sch_prio.c | 4 +- trunk/net/sched/sch_tbf.c | 11 +- trunk/net/sctp/ipv6.c | 3 +- trunk/net/tipc/subscr.c | 2 +- trunk/net/wireless/wext.c | 1 - trunk/net/xfrm/xfrm_output.c | 5 +- 409 files changed, 6596 insertions(+), 21430 deletions(-) create mode 100644 trunk/Documentation/usb/auerswald.txt delete mode 100644 trunk/arch/sparc64/kernel/kstack.h create mode 100644 trunk/drivers/usb/misc/auerswald.c delete mode 100644 trunk/drivers/usb/musb/Kconfig delete mode 100644 trunk/drivers/usb/musb/Makefile delete mode 100644 trunk/drivers/usb/musb/cppi_dma.c delete mode 100644 trunk/drivers/usb/musb/cppi_dma.h delete mode 100644 trunk/drivers/usb/musb/davinci.c delete mode 100644 trunk/drivers/usb/musb/davinci.h delete mode 100644 trunk/drivers/usb/musb/musb_core.c delete mode 100644 trunk/drivers/usb/musb/musb_core.h delete mode 100644 trunk/drivers/usb/musb/musb_debug.h delete mode 100644 trunk/drivers/usb/musb/musb_dma.h delete mode 100644 trunk/drivers/usb/musb/musb_gadget.c delete mode 100644 trunk/drivers/usb/musb/musb_gadget.h delete mode 100644 trunk/drivers/usb/musb/musb_gadget_ep0.c delete mode 100644 trunk/drivers/usb/musb/musb_host.c delete mode 100644 trunk/drivers/usb/musb/musb_host.h delete mode 100644 trunk/drivers/usb/musb/musb_io.h delete mode 100644 trunk/drivers/usb/musb/musb_procfs.c delete mode 100644 trunk/drivers/usb/musb/musb_regs.h delete mode 100644 trunk/drivers/usb/musb/musb_virthub.c delete mode 100644 trunk/drivers/usb/musb/musbhsdma.c delete mode 100644 trunk/drivers/usb/musb/omap2430.c delete mode 100644 trunk/drivers/usb/musb/omap2430.h delete mode 100644 trunk/drivers/usb/musb/tusb6010.c delete mode 100644 trunk/drivers/usb/musb/tusb6010.h delete mode 100644 trunk/drivers/usb/musb/tusb6010_omap.c delete mode 100644 trunk/drivers/usb/storage/sierra_ms.c delete mode 100644 trunk/drivers/usb/storage/sierra_ms.h create mode 100644 trunk/fs/xfs/linux-2.6/sema.h rename trunk/{arch/h8300/include/asm => include/asm-h8300}/Kbuild (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/a.out.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/atomic.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/auxvec.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/bitops.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/bootinfo.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/bug.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/bugs.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/byteorder.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/cache.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/cachectl.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/cacheflush.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/checksum.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/cputime.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/current.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/dbg.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/delay.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/device.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/div64.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/dma.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/elf.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/emergency-restart.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/errno.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/fb.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/fcntl.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/flat.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/fpu.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/futex.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/gpio.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/hardirq.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/hw_irq.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/io.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/ioctl.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/ioctls.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/ipcbuf.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/irq.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/irq_regs.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/kdebug.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/kmap_types.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/linkage.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/local.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/mc146818rtc.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/md.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/mman.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/mmu.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/mmu_context.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/module.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/msgbuf.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/mutex.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/page.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/page_offset.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/param.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/pci.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/percpu.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/pgalloc.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/pgtable.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/poll.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/posix_types.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/processor.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/ptrace.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/regs267x.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/regs306x.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/resource.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/scatterlist.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/sections.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/segment.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/sembuf.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/setup.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/sh_bios.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/shm.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/shmbuf.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/shmparam.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/sigcontext.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/siginfo.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/signal.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/smp.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/socket.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/sockios.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/spinlock.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/stat.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/statfs.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/string.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/system.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/target_time.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/termbits.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/termios.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/thread_info.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/timex.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/tlb.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/tlbflush.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/topology.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/traps.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/types.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/uaccess.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/ucontext.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/unaligned.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/unistd.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/user.h (100%) rename trunk/{arch/h8300/include/asm => include/asm-h8300}/virtconvert.h (100%) delete mode 100644 trunk/include/linux/cred.h delete mode 100644 trunk/include/linux/usb/musb.h diff --git a/[refs] b/[refs] index 518b77a75caa..f95a91d03a15 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f3b9605d744df537dee10fd06630f35a62b343ec +refs/heads/master: 9bfa35fe422c74882e27cc54450a5f76c96aad68 diff --git a/trunk/Documentation/devices.txt b/trunk/Documentation/devices.txt index 05c80645e4ee..e6244cde26e9 100644 --- a/trunk/Documentation/devices.txt +++ b/trunk/Documentation/devices.txt @@ -2560,6 +2560,9 @@ Your cooperation is appreciated. 96 = /dev/usb/hiddev0 1st USB HID device ... 111 = /dev/usb/hiddev15 16th USB HID device + 112 = /dev/usb/auer0 1st auerswald ISDN device + ... + 127 = /dev/usb/auer15 16th auerswald ISDN device 128 = /dev/usb/brlvgr0 First Braille Voyager device ... 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device diff --git a/trunk/Documentation/ioctl-number.txt b/trunk/Documentation/ioctl-number.txt index 1c6b545635a2..3bb5f466a90d 100644 --- a/trunk/Documentation/ioctl-number.txt +++ b/trunk/Documentation/ioctl-number.txt @@ -105,6 +105,7 @@ Code Seq# Include File Comments 'T' all linux/soundcard.h conflict! 'T' all asm-i386/ioctls.h conflict! 'U' 00-EF linux/drivers/usb/usb.h +'U' F0-FF drivers/usb/auerswald.c 'V' all linux/vt.h 'W' 00-1F linux/watchdog.h conflict! 'W' 00-1F linux/wanrouter.h conflict! diff --git a/trunk/Documentation/rfkill.txt b/trunk/Documentation/rfkill.txt index 6fcb3060dec5..28b6ec87c642 100644 --- a/trunk/Documentation/rfkill.txt +++ b/trunk/Documentation/rfkill.txt @@ -363,11 +363,6 @@ This rule exists because users of the rfkill subsystem expect to get (and set, when possible) the overall transmitter rfkill state, not of a particular rfkill line. -5. During suspend, the rfkill class will attempt to soft-block the radio -through a call to rfkill->toggle_radio, and will try to restore its previous -state during resume. After a rfkill class is suspended, it will *not* call -rfkill->toggle_radio until it is resumed. - Example of a WLAN wireless driver connected to the rfkill subsystem: -------------------------------------------------------------------- diff --git a/trunk/Documentation/usb/auerswald.txt b/trunk/Documentation/usb/auerswald.txt new file mode 100644 index 000000000000..7ee4d8f69116 --- /dev/null +++ b/trunk/Documentation/usb/auerswald.txt @@ -0,0 +1,30 @@ + Auerswald USB kernel driver + =========================== + +What is it? What can I do with it? +================================== +The auerswald USB kernel driver connects your linux 2.4.x +system to the auerswald usb-enabled devices. + +There are two types of auerswald usb devices: +a) small PBX systems (ISDN) +b) COMfort system telephones (ISDN) + +The driver installation creates the devices +/dev/usb/auer0..15. These devices carry a vendor- +specific protocol. You may run all auerswald java +software on it. The java software needs a native +library "libAuerUsbJNINative.so" installed on +your system. This library is available from +auerswald and shipped as part of the java software. + +You may create the devices with: + mknod -m 666 /dev/usb/auer0 c 180 112 + ... + mknod -m 666 /dev/usb/auer15 c 180 127 + +Future plans +============ +- Connection to ISDN4LINUX (the hisax interface) + +The maintainer of this driver is wolfgang@iksw-muees.de diff --git a/trunk/Documentation/usb/power-management.txt b/trunk/Documentation/usb/power-management.txt index 9d31140e3f5b..b2fc4d4a9917 100644 --- a/trunk/Documentation/usb/power-management.txt +++ b/trunk/Documentation/usb/power-management.txt @@ -436,12 +436,7 @@ post_reset; the USB core guarantees that this is true of internal suspend/resume events as well. If a driver wants to block all suspend/resume calls during some -critical section, it can simply acquire udev->pm_mutex. Note that -calls to resume may be triggered indirectly. Block IO due to memory -allocations can make the vm subsystem resume a device. Thus while -holding this lock you must not allocate memory with GFP_KERNEL or -GFP_NOFS. - +critical section, it can simply acquire udev->pm_mutex. Alternatively, if the critical section might call some of the usb_autopm_* routines, the driver can avoid deadlock by doing: diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 4c5e9fe0f7db..af6aa4e4b392 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2928,12 +2928,6 @@ M: jirislaby@gmail.com L: linux-kernel@vger.kernel.org S: Maintained -MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER -P: Felipe Balbi -M: felipe.balbi@nokia.com -L: linux-usb@vger.kernel.org -S: Maintained - MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) P: Andrew Gallatin M: gallatin@myri.com @@ -3082,7 +3076,6 @@ M: horms@verge.net.au P: Julian Anastasov M: ja@ssi.bg L: netdev@vger.kernel.org -L: lvs-devel@vger.kernel.org S: Maintained NFS, SUNRPC, AND LOCKD CLIENTS @@ -4202,6 +4195,12 @@ M: oliver@neukum.name L: linux-usb@vger.kernel.org S: Maintained +USB AUERSWALD DRIVER +P: Wolfgang Muees +M: wolfgang@iksw-muees.de +L: linux-usb@vger.kernel.org +S: Maintained + USB BLOCK DRIVER (UB ub) P: Pete Zaitcev M: zaitcev@redhat.com diff --git a/trunk/arch/arm/mach-omap2/usb-tusb6010.c b/trunk/arch/arm/mach-omap2/usb-tusb6010.c index 10ef464d6be7..1607c941d95f 100644 --- a/trunk/arch/arm/mach-omap2/usb-tusb6010.c +++ b/trunk/arch/arm/mach-omap2/usb-tusb6010.c @@ -317,6 +317,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data, printk(error, 6, status); return -ENODEV; } + data->multipoint = 1; tusb_device.dev.platform_data = data; /* REVISIT let the driver know what DMA channels work */ diff --git a/trunk/arch/sparc/include/asm/irq_64.h b/trunk/arch/sparc/include/asm/irq_64.h index e3dd9303643d..3473e25231d9 100644 --- a/trunk/arch/sparc/include/asm/irq_64.h +++ b/trunk/arch/sparc/include/asm/irq_64.h @@ -93,8 +93,4 @@ static inline unsigned long get_softint(void) void __trigger_all_cpu_backtrace(void); #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() -extern void *hardirq_stack[NR_CPUS]; -extern void *softirq_stack[NR_CPUS]; -#define __ARCH_HAS_DO_SOFTIRQ - #endif diff --git a/trunk/arch/sparc/include/asm/of_device.h b/trunk/arch/sparc/include/asm/of_device.h index bba777a416d3..e5f5aedc2293 100644 --- a/trunk/arch/sparc/include/asm/of_device.h +++ b/trunk/arch/sparc/include/asm/of_device.h @@ -30,7 +30,8 @@ struct of_device extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); -/* This is just here during the transition */ +/* These are just here during the transition */ +#include #include #endif /* __KERNEL__ */ diff --git a/trunk/arch/sparc64/kernel/irq.c b/trunk/arch/sparc64/kernel/irq.c index 9b6689d9d570..ba43d85e8dde 100644 --- a/trunk/arch/sparc64/kernel/irq.c +++ b/trunk/arch/sparc64/kernel/irq.c @@ -682,32 +682,10 @@ void ack_bad_irq(unsigned int virt_irq) ino, virt_irq); } -void *hardirq_stack[NR_CPUS]; -void *softirq_stack[NR_CPUS]; - -static __attribute__((always_inline)) void *set_hardirq_stack(void) -{ - void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; - - __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); - if (orig_sp < sp || - orig_sp > (sp + THREAD_SIZE)) { - sp += THREAD_SIZE - 192 - STACK_BIAS; - __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); - } - - return orig_sp; -} -static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) -{ - __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); -} - void handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; - void *orig_sp; clear_softint(1 << irq); @@ -725,8 +703,6 @@ void handler_irq(int irq, struct pt_regs *regs) "i" (PSTATE_IE) : "memory"); - orig_sp = set_hardirq_stack(); - while (bucket_pa) { struct irq_desc *desc; unsigned long next_pa; @@ -743,38 +719,10 @@ void handler_irq(int irq, struct pt_regs *regs) bucket_pa = next_pa; } - restore_hardirq_stack(orig_sp); - irq_exit(); set_irq_regs(old_regs); } -void do_softirq(void) -{ - unsigned long flags; - - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; - - sp += THREAD_SIZE - 192 - STACK_BIAS; - - __asm__ __volatile__("mov %%sp, %0\n\t" - "mov %1, %%sp" - : "=&r" (orig_sp) - : "r" (sp)); - __do_softirq(); - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } - - local_irq_restore(flags); -} - #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) { diff --git a/trunk/arch/sparc64/kernel/kstack.h b/trunk/arch/sparc64/kernel/kstack.h deleted file mode 100644 index 4248d969272f..000000000000 --- a/trunk/arch/sparc64/kernel/kstack.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _KSTACK_H -#define _KSTACK_H - -#include -#include -#include -#include - -/* SP must be STACK_BIAS adjusted already. */ -static inline bool kstack_valid(struct thread_info *tp, unsigned long sp) -{ - unsigned long base = (unsigned long) tp; - - if (sp >= (base + sizeof(struct thread_info)) && - sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) - return true; - - if (hardirq_stack[tp->cpu]) { - base = (unsigned long) hardirq_stack[tp->cpu]; - if (sp >= base && - sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) - return true; - base = (unsigned long) softirq_stack[tp->cpu]; - if (sp >= base && - sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) - return true; - } - return false; -} - -/* Does "regs" point to a valid pt_regs trap frame? */ -static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs) -{ - unsigned long base = (unsigned long) tp; - unsigned long addr = (unsigned long) regs; - - if (addr >= base && - addr <= (base + THREAD_SIZE - sizeof(*regs))) - goto check_magic; - - if (hardirq_stack[tp->cpu]) { - base = (unsigned long) hardirq_stack[tp->cpu]; - if (addr >= base && - addr <= (base + THREAD_SIZE - sizeof(*regs))) - goto check_magic; - base = (unsigned long) softirq_stack[tp->cpu]; - if (addr >= base && - addr <= (base + THREAD_SIZE - sizeof(*regs))) - goto check_magic; - } - return false; - -check_magic: - if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) - return true; - return false; - -} - -#endif /* _KSTACK_H */ diff --git a/trunk/arch/sparc64/kernel/process.c b/trunk/arch/sparc64/kernel/process.c index 15f4178592e7..7f5debdc5fed 100644 --- a/trunk/arch/sparc64/kernel/process.c +++ b/trunk/arch/sparc64/kernel/process.c @@ -52,8 +52,6 @@ #include #include -#include "kstack.h" - static void sparc64_yield(int cpu) { if (tlb_type != hypervisor) @@ -237,6 +235,19 @@ void show_regs(struct pt_regs *regs) struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; static DEFINE_SPINLOCK(global_reg_snapshot_lock); +static bool kstack_valid(struct thread_info *tp, struct reg_window *rw) +{ + unsigned long thread_base, fp; + + thread_base = (unsigned long) tp; + fp = (unsigned long) rw; + + if (fp < (thread_base + sizeof(struct thread_info)) || + fp >= (thread_base + THREAD_SIZE)) + return false; + return true; +} + static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, int this_cpu) { @@ -253,11 +264,11 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); - if (kstack_valid(tp, (unsigned long) rw)) { + if (kstack_valid(tp, rw)) { global_reg_snapshot[this_cpu].i7 = rw->ins[7]; rw = (struct reg_window *) (rw->ins[6] + STACK_BIAS); - if (kstack_valid(tp, (unsigned long) rw)) + if (kstack_valid(tp, rw)) global_reg_snapshot[this_cpu].rpc = rw->ins[7]; } } else { @@ -817,7 +828,7 @@ asmlinkage int sparc_execve(struct pt_regs *regs) unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; - struct thread_info *tp; + unsigned long thread_info_base; struct reg_window *rw; unsigned long ret = 0; int count = 0; @@ -826,12 +837,14 @@ unsigned long get_wchan(struct task_struct *task) task->state == TASK_RUNNING) goto out; - tp = task_thread_info(task); + thread_info_base = (unsigned long) task_stack_page(task); bias = STACK_BIAS; fp = task_thread_info(task)->ksp + bias; do { - if (!kstack_valid(tp, fp)) + /* Bogus frame pointer? */ + if (fp < (thread_info_base + sizeof(struct thread_info)) || + fp >= (thread_info_base + THREAD_SIZE)) break; rw = (struct reg_window *) fp; pc = rw->ins[7]; diff --git a/trunk/arch/sparc64/kernel/smp.c b/trunk/arch/sparc64/kernel/smp.c index 743ccad61c60..27b81775a4de 100644 --- a/trunk/arch/sparc64/kernel/smp.c +++ b/trunk/arch/sparc64/kernel/smp.c @@ -858,7 +858,9 @@ void smp_tsb_sync(struct mm_struct *mm) extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; +#ifdef CONFIG_MAGIC_SYSRQ extern unsigned long xcall_fetch_glob_regs; +#endif extern unsigned long xcall_receive_signal; extern unsigned long xcall_new_mmu_context_version; #ifdef CONFIG_KGDB @@ -1003,10 +1005,12 @@ void kgdb_roundup_cpus(unsigned long flags) } #endif +#ifdef CONFIG_MAGIC_SYSRQ void smp_fetch_global_regs(void) { smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); } +#endif /* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us diff --git a/trunk/arch/sparc64/kernel/stacktrace.c b/trunk/arch/sparc64/kernel/stacktrace.c index 4e21d4a57d3b..e9d7f0660f2e 100644 --- a/trunk/arch/sparc64/kernel/stacktrace.c +++ b/trunk/arch/sparc64/kernel/stacktrace.c @@ -5,12 +5,10 @@ #include #include -#include "kstack.h" - void save_stack_trace(struct stack_trace *trace) { + unsigned long ksp, fp, thread_base; struct thread_info *tp = task_thread_info(current); - unsigned long ksp, fp; stack_trace_flush(); @@ -20,18 +18,23 @@ void save_stack_trace(struct stack_trace *trace) ); fp = ksp + STACK_BIAS; + thread_base = (unsigned long) tp; do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; - if (!kstack_valid(tp, fp)) + /* Bogus frame pointer? */ + if (fp < (thread_base + sizeof(struct thread_info)) || + fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf))) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); - if (kstack_is_trap_frame(tp, regs)) { + if (((unsigned long)regs <= + (thread_base + THREAD_SIZE - sizeof(*regs))) && + (regs->magic & ~0x1ff) == PT_REGS_MAGIC) { if (!(regs->tstate & TSTATE_PRIV)) break; pc = regs->tpc; diff --git a/trunk/arch/sparc64/kernel/traps.c b/trunk/arch/sparc64/kernel/traps.c index 3d924121c796..404e8561e2d0 100644 --- a/trunk/arch/sparc64/kernel/traps.c +++ b/trunk/arch/sparc64/kernel/traps.c @@ -39,7 +39,6 @@ #include #include "entry.h" -#include "kstack.h" /* When an irrecoverable trap occurs at tl > 0, the trap entry * code logs the trap state registers at every level in the trap @@ -2116,12 +2115,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) struct pt_regs *regs; unsigned long pc; - if (!kstack_valid(tp, fp)) + /* Bogus frame pointer? */ + if (fp < (thread_base + sizeof(struct thread_info)) || + fp >= (thread_base + THREAD_SIZE)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); - if (kstack_is_trap_frame(tp, regs)) { + if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { if (!(regs->tstate & TSTATE_PRIV)) break; pc = regs->tpc; diff --git a/trunk/arch/sparc64/lib/mcount.S b/trunk/arch/sparc64/lib/mcount.S index fad90ddb3a28..7735a7a60533 100644 --- a/trunk/arch/sparc64/lib/mcount.S +++ b/trunk/arch/sparc64/lib/mcount.S @@ -48,45 +48,12 @@ mcount: sub %g3, STACK_BIAS, %g3 cmp %sp, %g3 bg,pt %xcc, 1f - nop - lduh [%g6 + TI_CPU], %g1 - sethi %hi(hardirq_stack), %g3 - or %g3, %lo(hardirq_stack), %g3 - sllx %g1, 3, %g1 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f -2: sethi %hi(softirq_stack), %g3 - or %g3, %lo(softirq_stack), %g3 - ldx [%g3 + %g1], %g7 - cmp %sp, %g7 - bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f - nop - /* If we are already on ovstack, don't hop onto it - * again, we are already trying to output the stack overflow - * message. - */ + sethi %hi(panicstring), %g3 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough or %g7, %lo(ovstack), %g7 - add %g7, OVSTACKSIZE, %g3 - sub %g3, STACK_BIAS + 192, %g3 + add %g7, OVSTACKSIZE, %g7 sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - blu,pn %xcc, 2f - cmp %sp, %g3 - bleu,pn %xcc, 1f - nop -2: mov %g3, %sp - sethi %hi(panicstring), %g3 + mov %g7, %sp call prom_printf or %g3, %lo(panicstring), %o0 call prom_halt diff --git a/trunk/arch/sparc64/mm/init.c b/trunk/arch/sparc64/mm/init.c index 217de3ea29e8..4e821b3ecb03 100644 --- a/trunk/arch/sparc64/mm/init.c +++ b/trunk/arch/sparc64/mm/init.c @@ -49,7 +49,6 @@ #include #include #include -#include #define MAX_PHYS_ADDRESS (1UL << 42UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) @@ -1772,16 +1771,6 @@ void __init paging_init(void) if (tlb_type == hypervisor) sun4v_mdesc_init(); - /* Once the OF device tree and MDESC have been setup, we know - * the list of possible cpus. Therefore we can allocate the - * IRQ stacks. - */ - for_each_possible_cpu(i) { - /* XXX Use node local allocations... XXX */ - softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); - hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); - } - /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); diff --git a/trunk/arch/sparc64/mm/ultra.S b/trunk/arch/sparc64/mm/ultra.S index 86773e89dc1b..ff1dc44d363e 100644 --- a/trunk/arch/sparc64/mm/ultra.S +++ b/trunk/arch/sparc64/mm/ultra.S @@ -480,6 +480,7 @@ xcall_sync_tick: b rtrap_xcall ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 +#ifdef CONFIG_MAGIC_SYSRQ .globl xcall_fetch_glob_regs xcall_fetch_glob_regs: sethi %hi(global_reg_snapshot), %g1 @@ -510,6 +511,7 @@ xcall_fetch_glob_regs: membar #StoreStore stx %g3, [%g1 + GR_SNAP_THREAD] retry +#endif /* CONFIG_MAGIC_SYSRQ */ #ifdef DCACHE_ALIASING_POSSIBLE .align 32 diff --git a/trunk/crypto/digest.c b/trunk/crypto/digest.c index 5d3f1303da98..ac0919460d14 100644 --- a/trunk/crypto/digest.c +++ b/trunk/crypto/digest.c @@ -225,7 +225,7 @@ int crypto_init_digest_ops_async(struct crypto_tfm *tfm) struct ahash_tfm *crt = &tfm->crt_ahash; struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - if (dalg->dia_digestsize > PAGE_SIZE / 8) + if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) return -EINVAL; crt->init = digest_async_init; diff --git a/trunk/crypto/tcrypt.c b/trunk/crypto/tcrypt.c index 66368022e0bf..59821a22d752 100644 --- a/trunk/crypto/tcrypt.c +++ b/trunk/crypto/tcrypt.c @@ -481,31 +481,21 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, for (k = 0, temp = 0; k < template[i].np; k++) { printk(KERN_INFO "page %u\n", k); - q = &xbuf[IDX[k]]; - - n = template[i].tap[k]; - if (k == template[i].np - 1) - n += enc ? authsize : -authsize; - hexdump(q, n); + q = &axbuf[IDX[k]]; + hexdump(q, template[i].tap[k]); printk(KERN_INFO "%s\n", - memcmp(q, template[i].result + temp, n) ? + memcmp(q, template[i].result + temp, + template[i].tap[k] - + (k < template[i].np - 1 || enc ? + 0 : authsize)) ? "fail" : "pass"); - q += n; - if (k == template[i].np - 1 && !enc) { - if (memcmp(q, template[i].input + - temp + n, authsize)) - n = authsize; - else - n = 0; - } else { - for (n = 0; q[n]; n++) - ; - } + for (n = 0; q[template[i].tap[k] + n]; n++) + ; if (n) { printk("Result buffer corruption %u " "bytes:\n", n); - hexdump(q, n); + hexdump(&q[template[i].tap[k]], n); } temp += template[i].tap[k]; diff --git a/trunk/drivers/Makefile b/trunk/drivers/Makefile index 2735bde73475..a280ab3d0833 100644 --- a/trunk/drivers/Makefile +++ b/trunk/drivers/Makefile @@ -57,7 +57,6 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ obj-$(CONFIG_PARIDE) += block/paride/ obj-$(CONFIG_TC) += tc/ obj-$(CONFIG_USB) += usb/ -obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/gadget/ obj-$(CONFIG_SERIO) += input/serio/ diff --git a/trunk/drivers/bluetooth/Kconfig b/trunk/drivers/bluetooth/Kconfig index a235ca787465..7cb4029a5375 100644 --- a/trunk/drivers/bluetooth/Kconfig +++ b/trunk/drivers/bluetooth/Kconfig @@ -3,8 +3,8 @@ menu "Bluetooth device drivers" depends on BT config BT_HCIUSB - tristate "HCI USB driver" - depends on USB + tristate "HCI USB driver (old version)" + depends on USB && BT_HCIBTUSB=n help Bluetooth HCI USB driver. This driver is required if you want to use Bluetooth devices with @@ -23,15 +23,13 @@ config BT_HCIUSB_SCO Say Y here to compile support for SCO over HCI USB. config BT_HCIBTUSB - tristate "HCI USB driver (alternate version)" - depends on USB && EXPERIMENTAL && BT_HCIUSB=n + tristate "HCI USB driver" + depends on USB help Bluetooth HCI USB driver. This driver is required if you want to use Bluetooth devices with USB interface. - This driver is still experimental and has no SCO support. - Say Y here to compile support for Bluetooth USB devices into the kernel or say M to compile it as module (btusb). diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index 95ae9ba5661e..6a010681ecf3 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -2,7 +2,7 @@ * * Generic Bluetooth USB driver * - * Copyright (C) 2005-2007 Marcel Holtmann + * Copyright (C) 2005-2008 Marcel Holtmann * * * This program is free software; you can redistribute it and/or modify @@ -41,7 +41,7 @@ #define BT_DBG(D...) #endif -#define VERSION "0.2" +#define VERSION "0.3" static int ignore_dga; static int ignore_csr; @@ -160,12 +160,16 @@ static struct usb_device_id blacklist_table[] = { { } /* Terminating entry */ }; +#define BTUSB_MAX_ISOC_FRAMES 10 + #define BTUSB_INTR_RUNNING 0 #define BTUSB_BULK_RUNNING 1 +#define BTUSB_ISOC_RUNNING 2 struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; + struct usb_interface *isoc; spinlock_t lock; @@ -176,10 +180,15 @@ struct btusb_data { struct usb_anchor tx_anchor; struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; + struct usb_anchor isoc_anchor; struct usb_endpoint_descriptor *intr_ep; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; + struct usb_endpoint_descriptor *isoc_tx_ep; + struct usb_endpoint_descriptor *isoc_rx_ep; + + int isoc_altsetting; }; static void btusb_intr_complete(struct urb *urb) @@ -195,6 +204,8 @@ static void btusb_intr_complete(struct urb *urb) return; if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + if (hci_recv_fragment(hdev, HCI_EVENT_PKT, urb->transfer_buffer, urb->actual_length) < 0) { @@ -216,7 +227,7 @@ static void btusb_intr_complete(struct urb *urb) } } -static inline int btusb_submit_intr_urb(struct hci_dev *hdev) +static int btusb_submit_intr_urb(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; struct urb *urb; @@ -226,6 +237,9 @@ static inline int btusb_submit_intr_urb(struct hci_dev *hdev) BT_DBG("%s", hdev->name); + if (!data->intr_ep) + return -ENODEV; + urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; @@ -274,6 +288,8 @@ static void btusb_bulk_complete(struct urb *urb) return; if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, urb->transfer_buffer, urb->actual_length) < 0) { @@ -295,7 +311,7 @@ static void btusb_bulk_complete(struct urb *urb) } } -static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) +static int btusb_submit_bulk_urb(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; struct urb *urb; @@ -305,6 +321,9 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) BT_DBG("%s", hdev->name); + if (!data->bulk_rx_ep) + return -ENODEV; + urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; @@ -339,6 +358,127 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) return err; } +static void btusb_isoc_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hdev->driver_data; + int i, err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + for (i = 0; i < urb->number_of_packets; i++) { + unsigned int offset = urb->iso_frame_desc[i].offset; + unsigned int length = urb->iso_frame_desc[i].actual_length; + + if (urb->iso_frame_desc[i].status) + continue; + + hdev->stat.byte_rx += length; + + if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, + urb->transfer_buffer + offset, + length) < 0) { + BT_ERR("%s corrupted SCO packet", hdev->name); + hdev->stat.err_rx++; + } + } + } + + if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) +{ + int i, offset = 0; + + BT_DBG("len %d mtu %d", len, mtu); + + for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; + i++, offset += mtu, len -= mtu) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = mtu; + } + + if (len && i < BTUSB_MAX_ISOC_FRAMES) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = len; + i++; + } + + urb->number_of_packets = i; +} + +static int btusb_submit_isoc_urb(struct hci_dev *hdev) +{ + struct btusb_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!data->isoc_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * + BTUSB_MAX_ISOC_FRAMES; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); + + urb->dev = data->udev; + urb->pipe = pipe; + urb->context = hdev; + urb->complete = btusb_isoc_complete; + urb->interval = data->isoc_rx_ep->bInterval; + + urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; + urb->transfer_buffer = buf; + urb->transfer_buffer_length = size; + + __fill_isoc_descriptor(urb, size, + le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + kfree(buf); + } + + usb_free_urb(urb); + + return err; +} + static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; @@ -392,6 +532,9 @@ static int btusb_close(struct hci_dev *hdev) if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->intr_anchor); + clear_bit(BTUSB_BULK_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->bulk_anchor); @@ -453,6 +596,9 @@ static int btusb_send_frame(struct sk_buff *skb) break; case HCI_ACLDATA_PKT: + if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1) + return -ENODEV; + urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; @@ -467,9 +613,31 @@ static int btusb_send_frame(struct sk_buff *skb) break; case HCI_SCODATA_PKT: + if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) + return -ENODEV; + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + pipe = usb_sndisocpipe(data->udev, + data->isoc_tx_ep->bEndpointAddress); + + urb->dev = data->udev; + urb->pipe = pipe; + urb->context = skb; + urb->complete = btusb_tx_complete; + urb->interval = data->isoc_tx_ep->bInterval; + + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = skb->data; + urb->transfer_buffer_length = skb->len; + + __fill_isoc_descriptor(urb, skb->len, + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); + hdev->stat.sco_tx++; - kfree_skb(skb); - return 0; + break; default: return -EILSEQ; @@ -508,22 +676,86 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt) schedule_work(&data->work); } +static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) +{ + struct btusb_data *data = hdev->driver_data; + struct usb_interface *intf = data->isoc; + struct usb_endpoint_descriptor *ep_desc; + int i, err; + + if (!data->isoc) + return -ENODEV; + + err = usb_set_interface(data->udev, 1, altsetting); + if (err < 0) { + BT_ERR("%s setting interface failed (%d)", hdev->name, -err); + return err; + } + + data->isoc_altsetting = altsetting; + + data->isoc_tx_ep = NULL; + data->isoc_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { + data->isoc_tx_ep = ep_desc; + continue; + } + + if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { + data->isoc_rx_ep = ep_desc; + continue; + } + } + + if (!data->isoc_tx_ep || !data->isoc_rx_ep) { + BT_ERR("%s invalid SCO descriptors", hdev->name); + return -ENODEV; + } + + return 0; +} + static void btusb_work(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, work); struct hci_dev *hdev = data->hdev; - if (hdev->conn_hash.acl_num == 0) { + if (hdev->conn_hash.acl_num > 0) { + if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { + if (btusb_submit_bulk_urb(hdev) < 0) + clear_bit(BTUSB_BULK_RUNNING, &data->flags); + else + btusb_submit_bulk_urb(hdev); + } + } else { clear_bit(BTUSB_BULK_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->bulk_anchor); - return; } - if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { - if (btusb_submit_bulk_urb(hdev) < 0) - clear_bit(BTUSB_BULK_RUNNING, &data->flags); - else - btusb_submit_bulk_urb(hdev); + if (hdev->conn_hash.sco_num > 0) { + if (data->isoc_altsetting != 2) { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + + if (__set_isoc_interface(hdev, 2) < 0) + return; + } + + if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { + if (btusb_submit_isoc_urb(hdev) < 0) + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + else + btusb_submit_isoc_urb(hdev); + } + } else { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + + __set_isoc_interface(hdev, 0); } } @@ -597,6 +829,7 @@ static int btusb_probe(struct usb_interface *intf, init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); + init_usb_anchor(&data->isoc_anchor); hdev = hci_alloc_dev(); if (!hdev) { @@ -620,6 +853,9 @@ static int btusb_probe(struct usb_interface *intf, hdev->owner = THIS_MODULE; + /* interface numbers are hardcoded in the spec */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + if (reset || id->driver_info & BTUSB_RESET) set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); @@ -628,11 +864,16 @@ static int btusb_probe(struct usb_interface *intf, set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); } + if (id->driver_info & BTUSB_BROKEN_ISOC) + data->isoc = NULL; + if (id->driver_info & BTUSB_SNIFFER) { - struct usb_device *udev = interface_to_usbdev(intf); + struct usb_device *udev = data->udev; if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + data->isoc = NULL; } if (id->driver_info & BTUSB_BCM92035) { @@ -646,6 +887,16 @@ static int btusb_probe(struct usb_interface *intf, } } + if (data->isoc) { + err = usb_driver_claim_interface(&btusb_driver, + data->isoc, NULL); + if (err < 0) { + hci_free_dev(hdev); + kfree(data); + return err; + } + } + err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); @@ -670,6 +921,9 @@ static void btusb_disconnect(struct usb_interface *intf) hdev = data->hdev; + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + usb_set_intfdata(intf, NULL); hci_unregister_dev(hdev); diff --git a/trunk/drivers/char/hw_random/via-rng.c b/trunk/drivers/char/hw_random/via-rng.c index 128202e18fc9..f7feae4ebb5e 100644 --- a/trunk/drivers/char/hw_random/via-rng.c +++ b/trunk/drivers/char/hw_random/via-rng.c @@ -31,7 +31,6 @@ #include #include #include -#include #define PFX KBUILD_MODNAME ": " @@ -68,23 +67,16 @@ enum { * Another possible performance boost may come from simply buffering * until we have 4 bytes, thus returning a u32 at a time, * instead of the current u8-at-a-time. - * - * Padlock instructions can generate a spurious DNA fault, so - * we have to call them in the context of irq_ts_save/restore() */ static inline u32 xstore(u32 *addr, u32 edx_in) { u32 eax_out; - int ts_state; - - ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" :"=m"(*addr), "=a"(eax_out) :"D"(addr), "d"(edx_in)); - irq_ts_restore(ts_state); return eax_out; } diff --git a/trunk/drivers/char/random.c b/trunk/drivers/char/random.c index 1838aa3d24fe..e0d0e371909c 100644 --- a/trunk/drivers/char/random.c +++ b/trunk/drivers/char/random.c @@ -1571,7 +1571,6 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) return half_md4_transform(hash, keyptr->secret); } -EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, diff --git a/trunk/drivers/crypto/padlock-aes.c b/trunk/drivers/crypto/padlock-aes.c index bf2917d197a0..54a2a166e566 100644 --- a/trunk/drivers/crypto/padlock-aes.c +++ b/trunk/drivers/crypto/padlock-aes.c @@ -16,7 +16,6 @@ #include #include #include -#include #include "padlock.h" /* Control word. */ @@ -142,12 +141,6 @@ static inline void padlock_reset_key(void) asm volatile ("pushfl; popfl"); } -/* - * While the padlock instructions don't use FP/SSE registers, they - * generate a spurious DNA fault when cr0.ts is '1'. These instructions - * should be used only inside the irq_ts_save/restore() context - */ - static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, void *control_word) { @@ -212,23 +205,15 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(); - - ts_state = irq_ts_save(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); - irq_ts_restore(ts_state); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(); - - ts_state = irq_ts_save(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); - irq_ts_restore(ts_state); } static struct crypto_alg aes_alg = { @@ -259,14 +244,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, &ctx->cword.encrypt, @@ -274,7 +257,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); return err; } @@ -286,14 +268,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, &ctx->cword.decrypt, @@ -301,7 +281,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); + return err; } @@ -334,14 +314,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, @@ -351,7 +329,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); return err; } @@ -363,14 +340,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, walk.iv, &ctx->cword.decrypt, @@ -379,7 +354,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); return err; } diff --git a/trunk/drivers/crypto/padlock-sha.c b/trunk/drivers/crypto/padlock-sha.c index a7fbadebf623..40d5680fa013 100644 --- a/trunk/drivers/crypto/padlock-sha.c +++ b/trunk/drivers/crypto/padlock-sha.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "padlock.h" #define SHA1_DEFAULT_FALLBACK "sha1-generic" @@ -103,7 +102,6 @@ static void padlock_do_sha1(const char *in, char *out, int count) * PadLock microcode needs it that big. */ char buf[128+16]; char *result = NEAREST_ALIGNED(buf); - int ts_state; ((uint32_t *)result)[0] = SHA1_H0; ((uint32_t *)result)[1] = SHA1_H1; @@ -111,12 +109,9 @@ static void padlock_do_sha1(const char *in, char *out, int count) ((uint32_t *)result)[3] = SHA1_H3; ((uint32_t *)result)[4] = SHA1_H4; - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); } @@ -128,7 +123,6 @@ static void padlock_do_sha256(const char *in, char *out, int count) * PadLock microcode needs it that big. */ char buf[128+16]; char *result = NEAREST_ALIGNED(buf); - int ts_state; ((uint32_t *)result)[0] = SHA256_H0; ((uint32_t *)result)[1] = SHA256_H1; @@ -139,12 +133,9 @@ static void padlock_do_sha256(const char *in, char *out, int count) ((uint32_t *)result)[6] = SHA256_H6; ((uint32_t *)result)[7] = SHA256_H7; - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); } diff --git a/trunk/drivers/crypto/talitos.c b/trunk/drivers/crypto/talitos.c index ee827a7f7c6a..681c15f42083 100644 --- a/trunk/drivers/crypto/talitos.c +++ b/trunk/drivers/crypto/talitos.c @@ -96,9 +96,6 @@ struct talitos_private { unsigned int exec_units; unsigned int desc_types; - /* SEC Compatibility info */ - unsigned long features; - /* next channel to be assigned next incoming descriptor */ atomic_t last_chan; @@ -136,9 +133,6 @@ struct talitos_private { struct hwrng rng; }; -/* .features flag */ -#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 - /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ @@ -791,7 +785,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, /* copy the generated ICV to dst */ if (edesc->dma_len) { icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; + edesc->dst_nents + 1]; sg = sg_last(areq->dst, edesc->dst_nents); memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, icvdata, ctx->authsize); @@ -820,7 +814,7 @@ static void ipsec_esp_decrypt_done(struct device *dev, /* auth check */ if (edesc->dma_len) icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; + edesc->dst_nents + 1]; else icvdata = &edesc->link_tbl[0]; @@ -927,30 +921,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, &edesc->link_tbl[0]); if (sg_count > 1) { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[sg_count-1]; - struct scatterlist *sg; - struct talitos_private *priv = dev_get_drvdata(dev); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - /* If necessary for this SEC revision, - * add a link table entry for ICV. - */ - if ((priv->features & - TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { - link_tbl_ptr->j_extent = 0; - link_tbl_ptr++; - link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; - link_tbl_ptr->len = cpu_to_be16(authsize); - sg = sg_last(areq->src, edesc->src_nents ? : 1); - link_tbl_ptr->ptr = cpu_to_be32( - (char *)sg_dma_address(sg) - + sg->length - authsize); - } } else { /* Only one segment now, so no link tbl needed */ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); @@ -970,11 +944,12 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); } else { struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; + &edesc->link_tbl[edesc->src_nents]; + struct scatterlist *sg; desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) edesc->dma_link_tbl + - edesc->src_nents + 1); + edesc->src_nents); if (areq->src == areq->dst) { memcpy(link_tbl_ptr, &edesc->link_tbl[0], edesc->src_nents * sizeof(struct talitos_ptr)); @@ -982,10 +957,14 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, link_tbl_ptr); } - /* Add an entry to the link table for ICV data */ link_tbl_ptr += sg_count - 1; + + /* handle case where sg_last contains the ICV exclusively */ + sg = sg_last(areq->dst, edesc->dst_nents); + if (sg->length == ctx->authsize) + link_tbl_ptr--; + link_tbl_ptr->j_extent = 0; - sg_count++; link_tbl_ptr++; link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; link_tbl_ptr->len = cpu_to_be16(authsize); @@ -994,7 +973,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) edesc->dma_link_tbl + edesc->src_nents + - edesc->dst_nents + 2); + edesc->dst_nents + 1); desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, @@ -1061,12 +1040,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, /* * allocate space for base edesc plus the link tables, - * allowing for two separate entries for ICV and generated ICV (+ 2), + * allowing for a separate entry for the generated ICV (+ 1), * and the ICV data itself */ alloc_len = sizeof(struct ipsec_esp_edesc); if (src_nents || dst_nents) { - dma_len = (src_nents + dst_nents + 2) * + dma_len = (src_nents + dst_nents + 1) * sizeof(struct talitos_ptr) + ctx->authsize; alloc_len += dma_len; } else { @@ -1125,7 +1104,7 @@ static int aead_authenc_decrypt(struct aead_request *req) /* stash incoming ICV for later cmp with ICV generated by the h/w */ if (edesc->dma_len) icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; + edesc->dst_nents + 1]; else icvdata = &edesc->link_tbl[0]; @@ -1501,9 +1480,6 @@ static int talitos_probe(struct of_device *ofdev, goto err_out; } - if (of_device_is_compatible(np, "fsl,sec3.0")) - priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; - priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, GFP_KERNEL); priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, diff --git a/trunk/drivers/i2c/chips/isp1301_omap.c b/trunk/drivers/i2c/chips/isp1301_omap.c index 4655b794ebe3..18355ae2155d 100644 --- a/trunk/drivers/i2c/chips/isp1301_omap.c +++ b/trunk/drivers/i2c/chips/isp1301_omap.c @@ -1593,7 +1593,7 @@ static int isp1301_probe(struct i2c_adapter *bus, int address, int kind) if (machine_is_omap_h2()) { /* full speed signaling by default */ isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, - MC1_SPEED); + MC1_SPEED_REG); isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_SPD_SUSP_CTRL); diff --git a/trunk/drivers/input/serio/i8042-sparcio.h b/trunk/drivers/input/serio/i8042-sparcio.h index 692a79ec2a22..66bafe308b0c 100644 --- a/trunk/drivers/input/serio/i8042-sparcio.h +++ b/trunk/drivers/input/serio/i8042-sparcio.h @@ -1,11 +1,10 @@ #ifndef _I8042_SPARCIO_H #define _I8042_SPARCIO_H -#include - #include #include #include +#include static int i8042_kbd_irq = -1; static int i8042_aux_irq = -1; diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index a5c141cecd4e..4b4cb2bf4f11 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -1172,7 +1172,7 @@ config ETH16I config NE2000 tristate "NE2000/NE1000 support" - depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX + depends on NET_ISA || (Q40 && m) || M32R || TOSHIBA_RBTX4927 || TOSHIBA_RBTX4938 select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/trunk/drivers/net/acenic.c b/trunk/drivers/net/acenic.c index 66de80b64b92..e4483de84e7f 100644 --- a/trunk/drivers/net/acenic.c +++ b/trunk/drivers/net/acenic.c @@ -52,6 +52,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/net/arm/ixp4xx_eth.c b/trunk/drivers/net/arm/ixp4xx_eth.c index e2d702b8b2e4..020771bfb603 100644 --- a/trunk/drivers/net/arm/ixp4xx_eth.c +++ b/trunk/drivers/net/arm/ixp4xx_eth.c @@ -551,7 +551,7 @@ static int eth_poll(struct napi_struct *napi, int budget) if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { phys = dma_map_single(&dev->dev, skb->data, RX_BUFF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&dev->dev, phys)) { + if (dma_mapping_error(phys)) { dev_kfree_skb(skb); skb = NULL; } @@ -698,7 +698,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); - if (dma_mapping_error(&dev->dev, phys)) { + if (dma_mapping_error(phys)) { #ifdef __ARMEB__ dev_kfree_skb(skb); #else @@ -883,7 +883,7 @@ static int init_queues(struct port *port) desc->buf_len = MAX_MRU; desc->data = dma_map_single(&port->netdev->dev, data, RX_BUFF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&port->netdev->dev, desc->data)) { + if (dma_mapping_error(desc->data)) { free_buffer(buff); return -EIO; } diff --git a/trunk/drivers/net/atl1e/atl1e_ethtool.c b/trunk/drivers/net/atl1e/atl1e_ethtool.c index 619c6583e1aa..cdc3b85b10b9 100644 --- a/trunk/drivers/net/atl1e/atl1e_ethtool.c +++ b/trunk/drivers/net/atl1e/atl1e_ethtool.c @@ -355,7 +355,7 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct atl1e_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | - WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; diff --git a/trunk/drivers/net/au1000_eth.c b/trunk/drivers/net/au1000_eth.c index 5ee1b0557a02..cb8be490e5ae 100644 --- a/trunk/drivers/net/au1000_eth.c +++ b/trunk/drivers/net/au1000_eth.c @@ -807,7 +807,7 @@ static struct net_device * au1000_probe(int port_num) static int au1000_init(struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - unsigned long flags; + u32 flags; int i; u32 control; diff --git a/trunk/drivers/net/ax88796.c b/trunk/drivers/net/ax88796.c index a886a4b9f7e5..0b4adf4a0f7d 100644 --- a/trunk/drivers/net/ax88796.c +++ b/trunk/drivers/net/ax88796.c @@ -554,7 +554,7 @@ static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) spin_lock_irqsave(&ax->mii_lock, flags); mii_ethtool_gset(&ax->mii, cmd); - spin_unlock_irqrestore(&ax->mii_lock, flags); + spin_lock_irqsave(&ax->mii_lock, flags); return 0; } @@ -567,7 +567,7 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) spin_lock_irqsave(&ax->mii_lock, flags); rc = mii_ethtool_sset(&ax->mii, cmd); - spin_unlock_irqrestore(&ax->mii_lock, flags); + spin_lock_irqsave(&ax->mii_lock, flags); return rc; } diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index 2486a656f12d..5ebde67d4297 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -35,8 +35,8 @@ #include #include #include +#ifdef NETIF_F_HW_VLAN_TX #include -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define BCM_VLAN 1 #endif #include @@ -57,8 +57,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.8.0" -#define DRV_MODULE_RELDATE "Aug 14, 2008" +#define DRV_MODULE_VERSION "1.7.9" +#define DRV_MODULE_RELDATE "July 18, 2008" #define RUN_AT(x) (jiffies + (x)) @@ -2876,8 +2876,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct sw_bd *rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; - u16 vtag = 0; - int hw_vlan __maybe_unused = 0; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); @@ -2921,7 +2919,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (len <= bp->rx_copy_thresh) { struct sk_buff *new_skb; - new_skb = netdev_alloc_skb(bp->dev, len + 6); + new_skb = netdev_alloc_skb(bp->dev, len + 2); if (new_skb == NULL) { bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, sw_ring_prod); @@ -2930,9 +2928,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) /* aligned copy */ skb_copy_from_linear_data_offset(skb, - BNX2_RX_OFFSET - 6, - new_skb->data, len + 6); - skb_reserve(new_skb, 6); + BNX2_RX_OFFSET - 2, + new_skb->data, len + 2); + skb_reserve(new_skb, 2); skb_put(new_skb, len); bnx2_reuse_rx_skb(bp, rxr, skb, @@ -2943,25 +2941,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) goto next_rx; - if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && - !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) { - vtag = rx_hdr->l2_fhdr_vlan_tag; -#ifdef BCM_VLAN - if (bp->vlgrp) - hw_vlan = 1; - else -#endif - { - struct vlan_ethhdr *ve = (struct vlan_ethhdr *) - __skb_push(skb, 4); - - memmove(ve, skb->data + 4, ETH_ALEN * 2); - ve->h_vlan_proto = htons(ETH_P_8021Q); - ve->h_vlan_TCI = htons(vtag); - len += 4; - } - } - skb->protocol = eth_type_trans(skb, bp->dev); if ((len > (bp->dev->mtu + ETH_HLEN)) && @@ -2983,8 +2962,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } #ifdef BCM_VLAN - if (hw_vlan) - vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { + vlan_hwaccel_receive_skb(skb, bp->vlgrp, + rx_hdr->l2_fhdr_vlan_tag); + } else #endif netif_receive_skb(skb); @@ -3256,10 +3237,10 @@ bnx2_set_rx_mode(struct net_device *dev) BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; #ifdef BCM_VLAN - if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE)) rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; #else - if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) + if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; #endif if (dev->flags & IFF_PROMISC) { @@ -5982,12 +5963,10 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; } -#ifdef BCM_VLAN if (bp->vlgrp && vlan_tx_tag_present(skb)) { vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); } -#endif if ((mss = skb_shinfo(skb)->gso_size)) { u32 tcp_opt_len, ip_tcp_len; struct iphdr *iph; diff --git a/trunk/drivers/net/bnx2x.h b/trunk/drivers/net/bnx2x.h index b468f904c7f8..4bf4f7b205f2 100644 --- a/trunk/drivers/net/bnx2x.h +++ b/trunk/drivers/net/bnx2x.h @@ -40,20 +40,20 @@ #define DP(__mask, __fmt, __args...) do { \ if (bp->msglevel & (__mask)) \ printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", ##__args); \ + bp->dev?(bp->dev->name):"?", ##__args); \ } while (0) /* errors debug print */ #define BNX2X_DBG_ERR(__fmt, __args...) do { \ if (bp->msglevel & NETIF_MSG_PROBE) \ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", ##__args); \ + bp->dev?(bp->dev->name):"?", ##__args); \ } while (0) /* for errors (never masked) */ #define BNX2X_ERR(__fmt, __args...) do { \ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", ##__args); \ + bp->dev?(bp->dev->name):"?", ##__args); \ } while (0) /* before we have a dev->name use dev_info() */ @@ -120,8 +120,16 @@ #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) -#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) -#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) +#define NIG_WR(reg, val) REG_WR(bp, reg, val) +#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) +#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val) + + +#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) + +#define for_each_nondefault_queue(bp, var) \ + for (var = 1; var < bp->num_queues; var++) +#define is_multi(bp) (bp->num_queues > 1) /* fast path */ @@ -155,7 +163,7 @@ struct sw_rx_page { #define NUM_RX_SGE_PAGES 2 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) -/* RX_SGE_CNT is promised to be a power of 2 */ +/* RX_SGE_CNT is promissed to be a power of 2 */ #define RX_SGE_MASK (RX_SGE_CNT - 1) #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) #define MAX_RX_SGE (NUM_RX_SGE - 1) @@ -250,7 +258,8 @@ struct bnx2x_fastpath { unsigned long tx_pkt, rx_pkt, - rx_calls; + rx_calls, + rx_alloc_failed; /* TPA related */ struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; @@ -266,15 +275,6 @@ struct bnx2x_fastpath { #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) -#define BNX2X_HAS_TX_WORK(fp) \ - ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \ - (fp->tx_pkt_prod != fp->tx_pkt_cons)) - -#define BNX2X_HAS_RX_WORK(fp) \ - (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) - -#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) - /* MC hsi */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */ @@ -317,7 +317,7 @@ struct bnx2x_fastpath { #define RCQ_BD(x) ((x) & MAX_RCQ_BD) -/* This is needed for determining of last_max */ +/* This is needed for determening of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define __SGE_MASK_SET_BIT(el, bit) \ @@ -386,28 +386,20 @@ struct bnx2x_fastpath { #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ (TPA_TYPE_START | TPA_TYPE_END)) -#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG - -#define BNX2X_IP_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) - -#define BNX2X_L4_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - -#define BNX2X_RX_CSUM_OK(cqe) \ - (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) +#define BNX2X_RX_SUM_OK(cqe) \ + (!(cqe->fast_path_cqe.status_flags & \ + (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) #define BNX2X_RX_SUM_FIX(cqe) \ ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) +#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \ + ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \ + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG) + #define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) #define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) @@ -655,8 +647,6 @@ struct bnx2x_eth_stats { u32 brb_drop_hi; u32 brb_drop_lo; - u32 brb_truncate_hi; - u32 brb_truncate_lo; u32 jabber_packets_received; @@ -673,9 +663,6 @@ struct bnx2x_eth_stats { u32 mac_discard; u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; }; #define STATS_OFFSET32(stat_name) \ @@ -766,6 +753,7 @@ struct bnx2x { u16 def_att_idx; u32 attn_state; struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + u32 aeu_mask; u32 nig_mask; /* slow path ring */ @@ -784,7 +772,7 @@ struct bnx2x { u8 stats_pending; u8 set_mac_pending; - /* End of fields used in the performance code paths */ + /* End of fileds used in the performance code paths */ int panic; int msglevel; @@ -806,6 +794,9 @@ struct bnx2x { #define BP_FUNC(bp) (bp->func) #define BP_E1HVN(bp) (bp->func >> 1) #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) +/* assorted E1HVN */ +#define IS_E1HMF(bp) (bp->e1hmf != 0) +#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) int pm_cap; int pcie_cap; @@ -830,7 +821,6 @@ struct bnx2x { u32 mf_config; u16 e1hov; u8 e1hmf; -#define IS_E1HMF(bp) (bp->e1hmf != 0) u8 wol; @@ -846,6 +836,7 @@ struct bnx2x { u16 rx_ticks_int; u16 rx_ticks; + u32 stats_ticks; u32 lin_cnt; int state; @@ -861,7 +852,6 @@ struct bnx2x { #define BNX2X_STATE_ERROR 0xf000 int num_queues; -#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) u32 rx_mode; #define BNX2X_RX_MODE_NONE 0 @@ -912,17 +902,10 @@ struct bnx2x { }; -#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) - -#define for_each_nondefault_queue(bp, var) \ - for (var = 1; var < bp->num_queues; var++) -#define is_multi(bp) (bp->num_queues > 1) - - void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32); -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) @@ -993,7 +976,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define PCICFG_LINK_SPEED_SHIFT 16 -#define BNX2X_NUM_STATS 42 +#define BNX2X_NUM_STATS 39 #define BNX2X_NUM_TESTS 8 #define BNX2X_MAC_LOOPBACK 0 @@ -1024,10 +1007,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, /* resolution of the rate shaping timer - 100 usec */ #define RS_PERIODIC_TIMEOUT_USEC 100 /* resolution of fairness algorithm in usecs - - coefficient for calculating the actual t fair */ + coefficient for clauclating the actuall t fair */ #define T_FAIR_COEF 10000000 /* number of bytes in single QM arbitration cycle - - coefficient for calculating the fairness timer */ + coeffiecnt for calculating the fairness timer */ #define QM_ARB_BYTES 40000 #define FAIR_MEM 2 diff --git a/trunk/drivers/net/bnx2x_fw_defs.h b/trunk/drivers/net/bnx2x_fw_defs.h index 192fa981b930..e3da7f69d27b 100644 --- a/trunk/drivers/net/bnx2x_fw_defs.h +++ b/trunk/drivers/net/bnx2x_fw_defs.h @@ -9,171 +9,165 @@ #define CSTORM_ASSERT_LIST_INDEX_OFFSET \ - (IS_E1H_OFFSET ? 0x7000 : 0x1000) + (IS_E1H_OFFSET? 0x7000 : 0x1000) #define CSTORM_ASSERT_LIST_OFFSET(idx) \ - (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) + (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ - (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \ - ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \ - 0x40) + (index * 0x4))) + (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ + * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ + * 0x4))) #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \ - ((function&1) * 0x100)) : (0x1900 + (function * 0x40))) + (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ + * 0x100)) : (0x1900 + (function * 0x40))) #define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \ - ((function&1) * 0x100)) : (0x1908 + (function * 0x40))) + (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ + * 0x100)) : (0x1908 + (function * 0x40))) #define CSTORM_FUNCTION_MODE_OFFSET \ - (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) + (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) #define CSTORM_HC_BTR_OFFSET(port) \ - (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) + (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ - (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ + (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4))) #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ - (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ + (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4))) #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ - (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ + (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ (0x1400 + (port * 0x280) + (cpu_id * 0x28))) #define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ - (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ + (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ (0x1408 + (port * 0x280) + (cpu_id * 0x28))) #define CSTORM_STATS_FLAGS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \ + (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ (function * 0x8))) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff) + (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) #define TSTORM_ASSERT_LIST_INDEX_OFFSET \ - (IS_E1H_OFFSET ? 0xa000 : 0x1000) + (IS_E1H_OFFSET? 0xa000 : 0x1000) #define TSTORM_ASSERT_LIST_OFFSET(idx) \ - (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) + (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ - (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \ - : (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) + (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ + (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ - (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ - 0x28) + (index * 0x4))) + (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ + 0x4))) #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) + (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1400 + (function * 0x28))) #define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ - (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) + (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1408 + (function * 0x28))) #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \ + (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ (function * 0x8))) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \ + (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ (function * 0x38))) #define TSTORM_FUNCTION_MODE_OFFSET \ - (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff) + (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) #define TSTORM_HC_BTR_OFFSET(port) \ - (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) + (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) #define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ + (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ (function * 0x80))) #define TSTORM_INDIRECTION_TABLE_SIZE 0x80 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \ + (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ (function * 0x38))) -#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ - (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \ - 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38))) #define TSTORM_RX_PRODS_OFFSET(port, client_id) \ - (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \ - : (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) + (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ + (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) #define TSTORM_STATS_FLAGS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \ + (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ (function * 0x8))) -#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20) -#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10) -#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200) +#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) +#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) +#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) #define USTORM_ASSERT_LIST_INDEX_OFFSET \ - (IS_E1H_OFFSET ? 0x8000 : 0x1000) + (IS_E1H_OFFSET? 0x8000 : 0x1000) #define USTORM_ASSERT_LIST_OFFSET(idx) \ - (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) + (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ - (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ + (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ (0x5450 + (port * 0x1c8) + (clientId * 0x18))) #define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ - (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \ - 0x28) + (index * 0x4))) + (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ + 0x4))) #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1900 + (function * 0x28))) + (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1900 + (function * 0x28))) #define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1908 + (function * 0x28))) + (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1908 + (function * 0x28))) #define USTORM_FUNCTION_MODE_OFFSET \ - (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) + (IS_E1H_OFFSET? 0x2448 : 0xffffffff) #define USTORM_HC_BTR_OFFSET(port) \ - (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) + (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) #define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ - (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ + (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ (0x5448 + (port * 0x1c8) + (clientId * 0x18))) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \ + (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ (function * 0x8))) #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ - (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ + (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4))) #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ - (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ + (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ (index * 0x4))) #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ - (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ + (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ (0x1400 + (port * 0x280) + (cpu_id * 0x28))) #define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ - (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ + (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ (0x1408 + (port * 0x280) + (cpu_id * 0x28))) #define XSTORM_ASSERT_LIST_INDEX_OFFSET \ - (IS_E1H_OFFSET ? 0x9000 : 0x1000) + (IS_E1H_OFFSET? 0x9000 : 0x1000) #define XSTORM_ASSERT_LIST_OFFSET(idx) \ - (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) + (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ - (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) + (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ - (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ - 0x28) + (index * 0x4))) + (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ + 0x4))) #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) + (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1400 + (function * 0x28))) #define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ - (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ - ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) + (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ + * 0xa0)) : (0x1408 + (function * 0x28))) #define XSTORM_E1HOV_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff) + (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \ + (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ (function * 0x8))) #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \ + (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ (function * 0x70))) #define XSTORM_FUNCTION_MODE_OFFSET \ - (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff) + (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) #define XSTORM_HC_BTR_OFFSET(port) \ - (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) -#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ - (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \ - 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38))) + (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \ + (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ (function * 0x70))) #define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \ + (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ (function * 0x10))) #define XSTORM_SPQ_PROD_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \ + (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ (function * 0x10))) #define XSTORM_STATS_FLAGS_OFFSET(function) \ - (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \ + (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ (function * 0x8))) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 diff --git a/trunk/drivers/net/bnx2x_hsi.h b/trunk/drivers/net/bnx2x_hsi.h index efd764427fa1..d3e8198d7dba 100644 --- a/trunk/drivers/net/bnx2x_hsi.h +++ b/trunk/drivers/net/bnx2x_hsi.h @@ -1268,7 +1268,7 @@ struct doorbell { /* - * IGU driver acknowledgement register + * IGU driver acknowlegement register */ struct igu_ack_register { #if defined(__BIG_ENDIAN) @@ -1882,7 +1882,7 @@ struct timers_block_context { }; /* - * structure for easy accessibility to assembler + * structure for easy accessability to assembler */ struct eth_tx_bd_flags { u8 as_bitfield; @@ -2044,7 +2044,7 @@ struct eth_context { /* - * Ethernet doorbell + * ethernet doorbell */ struct eth_tx_doorbell { #if defined(__BIG_ENDIAN) @@ -2256,7 +2256,7 @@ struct ramrod_data { }; /* - * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) + * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) */ union eth_ramrod_data { struct ramrod_data general; @@ -2330,7 +2330,7 @@ struct spe_hdr { }; /* - * Ethernet slow path element + * ethernet slow path element */ union eth_specific_data { u8 protocol_data[8]; @@ -2343,7 +2343,7 @@ union eth_specific_data { }; /* - * Ethernet slow path element + * ethernet slow path element */ struct eth_spe { struct spe_hdr hdr; @@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers { /* - * common flag to indicate existence of TPA. + * common flag to indicate existance of TPA. */ struct tstorm_eth_tpa_exist { #if defined(__BIG_ENDIAN) @@ -2765,7 +2765,7 @@ struct tstorm_common_stats { }; /* - * Eth statistics query structure for the eth_stats_query ramrod + * Eth statistics query sturcture for the eth_stats_quesry ramrod */ struct eth_stats_query { struct xstorm_common_stats xstorm_common; diff --git a/trunk/drivers/net/bnx2x_init.h b/trunk/drivers/net/bnx2x_init.h index 130927cfc75b..4c7750789b62 100644 --- a/trunk/drivers/net/bnx2x_init.h +++ b/trunk/drivers/net/bnx2x_init.h @@ -72,26 +72,26 @@ struct raw_op { - u32 op:8; - u32 offset:24; + u32 op :8; + u32 offset :24; u32 raw_data; }; struct op_read { - u32 op:8; - u32 offset:24; + u32 op :8; + u32 offset :24; u32 pad; }; struct op_write { - u32 op:8; - u32 offset:24; + u32 op :8; + u32 offset :24; u32 val; }; struct op_string_write { - u32 op:8; - u32 offset:24; + u32 op :8; + u32 offset :24; #ifdef __LITTLE_ENDIAN u16 data_off; u16 data_len; @@ -102,8 +102,8 @@ struct op_string_write { }; struct op_zero { - u32 op:8; - u32 offset:24; + u32 op :8; + u32 offset :24; u32 len; }; @@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, /********************************************************* There are different blobs for each PRAM section. In addition, each blob write operation is divided into a few operations - in order to decrease the amount of phys. contiguous buffer needed. + in order to decrease the amount of phys. contigious buffer needed. Thus, when we select a blob the address may be with some offset from the beginning of PRAM section. The same holds for the INT_TABLE sections. @@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end) len = op->str_wr.data_len; data = data_base + op->str_wr.data_off; - /* careful! it must be in order */ + /* carefull! it must be in order */ if (unlikely(op_type > OP_WB)) { /* If E1 only */ @@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc) return crc_res; } -/* registers addresses are not in order +/* regiesers addresses are not in order so these arrays help simplify the code */ static const int cm_start[E1H_FUNC_MAX][9] = { {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, diff --git a/trunk/drivers/net/bnx2x_init_values.h b/trunk/drivers/net/bnx2x_init_values.h index 9755bf6b08dd..63019055e4bb 100644 --- a/trunk/drivers/net/bnx2x_init_values.h +++ b/trunk/drivers/net/bnx2x_init_values.h @@ -901,28 +901,31 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, - {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2}, + {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, - {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, + {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, + {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, - {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c}, + {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, - {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c}, + {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e}, {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, - {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c}, + {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e}, {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, @@ -930,11 +933,11 @@ static const struct raw_op init_ops[] = { {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, - {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e}, + {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0}, {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, -#define USEM_COMMON_END 787 -#define USEM_PORT0_START 787 +#define USEM_COMMON_END 790 +#define USEM_PORT0_START 790 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, @@ -947,27 +950,44 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, -#define USEM_PORT0_END 818 -#define USEM_PORT1_START 818 + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, +#define USEM_PORT0_END 838 +#define USEM_PORT1_START 838 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, @@ -980,59 +1000,76 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, - {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, -#define USEM_PORT1_END 849 -#define USEM_FUNC0_START 849 + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, + {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, +#define USEM_PORT1_END 886 +#define USEM_FUNC0_START 886 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, -#define USEM_FUNC0_END 851 -#define USEM_FUNC1_START 851 +#define USEM_FUNC0_END 888 +#define USEM_FUNC1_START 888 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, -#define USEM_FUNC1_END 853 -#define USEM_FUNC2_START 853 +#define USEM_FUNC1_END 890 +#define USEM_FUNC2_START 890 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, -#define USEM_FUNC2_END 855 -#define USEM_FUNC3_START 855 +#define USEM_FUNC2_END 892 +#define USEM_FUNC3_START 892 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, -#define USEM_FUNC3_END 857 -#define USEM_FUNC4_START 857 +#define USEM_FUNC3_END 894 +#define USEM_FUNC4_START 894 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, -#define USEM_FUNC4_END 859 -#define USEM_FUNC5_START 859 +#define USEM_FUNC4_END 896 +#define USEM_FUNC5_START 896 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, -#define USEM_FUNC5_END 861 -#define USEM_FUNC6_START 861 +#define USEM_FUNC5_END 898 +#define USEM_FUNC6_START 898 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, -#define USEM_FUNC6_END 863 -#define USEM_FUNC7_START 863 +#define USEM_FUNC6_END 900 +#define USEM_FUNC7_START 900 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, -#define USEM_FUNC7_END 865 -#define CSEM_COMMON_START 865 +#define USEM_FUNC7_END 902 +#define CSEM_COMMON_START 902 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, @@ -1091,29 +1128,29 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, - {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0}, + {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, - {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8}, + {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa}, {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, - {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8}, + {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba}, {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, - {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8}, + {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca}, {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, - {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca}, + {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc}, {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, -#define CSEM_COMMON_END 944 -#define CSEM_PORT0_START 944 +#define CSEM_COMMON_END 981 +#define CSEM_PORT0_START 981 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, @@ -1126,8 +1163,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, -#define CSEM_PORT0_END 956 -#define CSEM_PORT1_START 956 +#define CSEM_PORT0_END 993 +#define CSEM_PORT1_START 993 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, @@ -1140,43 +1177,43 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, -#define CSEM_PORT1_END 968 -#define CSEM_FUNC0_START 968 +#define CSEM_PORT1_END 1005 +#define CSEM_FUNC0_START 1005 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, -#define CSEM_FUNC0_END 970 -#define CSEM_FUNC1_START 970 +#define CSEM_FUNC0_END 1007 +#define CSEM_FUNC1_START 1007 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, -#define CSEM_FUNC1_END 972 -#define CSEM_FUNC2_START 972 +#define CSEM_FUNC1_END 1009 +#define CSEM_FUNC2_START 1009 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, -#define CSEM_FUNC2_END 974 -#define CSEM_FUNC3_START 974 +#define CSEM_FUNC2_END 1011 +#define CSEM_FUNC3_START 1011 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, -#define CSEM_FUNC3_END 976 -#define CSEM_FUNC4_START 976 +#define CSEM_FUNC3_END 1013 +#define CSEM_FUNC4_START 1013 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, -#define CSEM_FUNC4_END 978 -#define CSEM_FUNC5_START 978 +#define CSEM_FUNC4_END 1015 +#define CSEM_FUNC5_START 1015 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, -#define CSEM_FUNC5_END 980 -#define CSEM_FUNC6_START 980 +#define CSEM_FUNC5_END 1017 +#define CSEM_FUNC6_START 1017 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, -#define CSEM_FUNC6_END 982 -#define CSEM_FUNC7_START 982 +#define CSEM_FUNC6_END 1019 +#define CSEM_FUNC7_START 1019 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, -#define CSEM_FUNC7_END 984 -#define XPB_COMMON_START 984 +#define CSEM_FUNC7_END 1021 +#define XPB_COMMON_START 1021 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, -#define XPB_COMMON_END 985 -#define DQ_COMMON_START 985 +#define XPB_COMMON_END 1022 +#define DQ_COMMON_START 1022 {OP_WR, DORQ_REG_MODE_ACT, 0x2}, {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, @@ -1195,8 +1232,8 @@ static const struct raw_op init_ops[] = { {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, {OP_WR, DORQ_REG_REGN, 0x7c1004}, {OP_WR, DORQ_REG_IF_EN, 0xf}, -#define DQ_COMMON_END 1003 -#define TIMERS_COMMON_START 1003 +#define DQ_COMMON_END 1040 +#define TIMERS_COMMON_START 1040 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, @@ -1219,14 +1256,14 @@ static const struct raw_op init_ops[] = { {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, -#define TIMERS_COMMON_END 1025 -#define TIMERS_PORT0_START 1025 +#define TIMERS_COMMON_END 1062 +#define TIMERS_PORT0_START 1062 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, -#define TIMERS_PORT0_END 1026 -#define TIMERS_PORT1_START 1026 +#define TIMERS_PORT0_END 1063 +#define TIMERS_PORT1_START 1063 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, -#define TIMERS_PORT1_END 1027 -#define XSDM_COMMON_START 1027 +#define TIMERS_PORT1_END 1064 +#define XSDM_COMMON_START 1064 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, @@ -1274,8 +1311,8 @@ static const struct raw_op init_ops[] = { {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, -#define XSDM_COMMON_END 1074 -#define QM_COMMON_START 1074 +#define XSDM_COMMON_END 1111 +#define QM_COMMON_START 1111 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, @@ -1576,8 +1613,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, {OP_WR, QM_REG_CMINTEN, 0xff}, -#define QM_COMMON_END 1374 -#define PBF_COMMON_START 1374 +#define QM_COMMON_END 1411 +#define PBF_COMMON_START 1411 {OP_WR, PBF_REG_INIT, 0x1}, {OP_WR, PBF_REG_INIT_P4, 0x1}, {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, @@ -1585,20 +1622,20 @@ static const struct raw_op init_ops[] = { {OP_WR, PBF_REG_INIT_P4, 0x0}, {OP_WR, PBF_REG_INIT, 0x0}, {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, -#define PBF_COMMON_END 1381 -#define PBF_PORT0_START 1381 +#define PBF_COMMON_END 1418 +#define PBF_PORT0_START 1418 {OP_WR, PBF_REG_INIT_P0, 0x1}, {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, {OP_WR, PBF_REG_INIT_P0, 0x0}, {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, -#define PBF_PORT0_END 1385 -#define PBF_PORT1_START 1385 +#define PBF_PORT0_END 1422 +#define PBF_PORT1_START 1422 {OP_WR, PBF_REG_INIT_P1, 0x1}, {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, {OP_WR, PBF_REG_INIT_P1, 0x0}, {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, -#define PBF_PORT1_END 1389 -#define XCM_COMMON_START 1389 +#define PBF_PORT1_END 1426 +#define XCM_COMMON_START 1426 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, @@ -1633,7 +1670,7 @@ static const struct raw_op init_ops[] = { {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, {OP_ZR, XCM_REG_XX_TABLE, 0x12}, - {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc}, + {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce}, {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, @@ -1663,8 +1700,8 @@ static const struct raw_op init_ops[] = { {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, -#define XCM_COMMON_END 1453 -#define XCM_PORT0_START 1453 +#define XCM_COMMON_END 1490 +#define XCM_PORT0_START 1490 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, @@ -1673,8 +1710,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, -#define XCM_PORT0_END 1461 -#define XCM_PORT1_START 1461 +#define XCM_PORT0_END 1498 +#define XCM_PORT1_START 1498 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, @@ -1683,8 +1720,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, -#define XCM_PORT1_END 1469 -#define XCM_FUNC0_START 1469 +#define XCM_PORT1_END 1506 +#define XCM_FUNC0_START 1506 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, @@ -1694,8 +1731,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, -#define XCM_FUNC0_END 1478 -#define XCM_FUNC1_START 1478 +#define XCM_FUNC0_END 1515 +#define XCM_FUNC1_START 1515 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, @@ -1705,8 +1742,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, -#define XCM_FUNC1_END 1487 -#define XCM_FUNC2_START 1487 +#define XCM_FUNC1_END 1524 +#define XCM_FUNC2_START 1524 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, @@ -1716,8 +1753,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, -#define XCM_FUNC2_END 1496 -#define XCM_FUNC3_START 1496 +#define XCM_FUNC2_END 1533 +#define XCM_FUNC3_START 1533 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, @@ -1727,8 +1764,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, -#define XCM_FUNC3_END 1505 -#define XCM_FUNC4_START 1505 +#define XCM_FUNC3_END 1542 +#define XCM_FUNC4_START 1542 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, @@ -1738,8 +1775,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, -#define XCM_FUNC4_END 1514 -#define XCM_FUNC5_START 1514 +#define XCM_FUNC4_END 1551 +#define XCM_FUNC5_START 1551 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, @@ -1749,8 +1786,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, -#define XCM_FUNC5_END 1523 -#define XCM_FUNC6_START 1523 +#define XCM_FUNC5_END 1560 +#define XCM_FUNC6_START 1560 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, @@ -1760,8 +1797,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, -#define XCM_FUNC6_END 1532 -#define XCM_FUNC7_START 1532 +#define XCM_FUNC6_END 1569 +#define XCM_FUNC7_START 1569 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, @@ -1771,8 +1808,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, -#define XCM_FUNC7_END 1541 -#define XSEM_COMMON_START 1541 +#define XCM_FUNC7_END 1578 +#define XSEM_COMMON_START 1578 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, @@ -1839,9 +1876,9 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef}, {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, @@ -1849,29 +1886,29 @@ static const struct raw_op init_ops[] = { {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3}, {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, - {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315}, + {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317}, {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, @@ -1881,10 +1918,10 @@ static const struct raw_op init_ops[] = { {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, - {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317}, + {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319}, {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, -#define XSEM_COMMON_END 1651 -#define XSEM_PORT0_START 1651 +#define XSEM_COMMON_END 1688 +#define XSEM_PORT0_START 1688 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, @@ -1897,7 +1934,7 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, @@ -1913,12 +1950,12 @@ static const struct raw_op init_ops[] = { {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, -#define XSEM_PORT0_END 1683 -#define XSEM_PORT1_START 1683 +#define XSEM_PORT0_END 1720 +#define XSEM_PORT1_START 1720 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, @@ -1931,7 +1968,7 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, @@ -1947,65 +1984,65 @@ static const struct raw_op init_ops[] = { {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, - {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b}, + {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, -#define XSEM_PORT1_END 1715 -#define XSEM_FUNC0_START 1715 +#define XSEM_PORT1_END 1752 +#define XSEM_FUNC0_START 1752 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, -#define XSEM_FUNC0_END 1718 -#define XSEM_FUNC1_START 1718 +#define XSEM_FUNC0_END 1755 +#define XSEM_FUNC1_START 1755 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, -#define XSEM_FUNC1_END 1721 -#define XSEM_FUNC2_START 1721 +#define XSEM_FUNC1_END 1758 +#define XSEM_FUNC2_START 1758 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, -#define XSEM_FUNC2_END 1724 -#define XSEM_FUNC3_START 1724 +#define XSEM_FUNC2_END 1761 +#define XSEM_FUNC3_START 1761 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, -#define XSEM_FUNC3_END 1727 -#define XSEM_FUNC4_START 1727 +#define XSEM_FUNC3_END 1764 +#define XSEM_FUNC4_START 1764 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, -#define XSEM_FUNC4_END 1730 -#define XSEM_FUNC5_START 1730 +#define XSEM_FUNC4_END 1767 +#define XSEM_FUNC5_START 1767 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, -#define XSEM_FUNC5_END 1733 -#define XSEM_FUNC6_START 1733 +#define XSEM_FUNC5_END 1770 +#define XSEM_FUNC6_START 1770 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, -#define XSEM_FUNC6_END 1736 -#define XSEM_FUNC7_START 1736 +#define XSEM_FUNC6_END 1773 +#define XSEM_FUNC7_START 1773 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, -#define XSEM_FUNC7_END 1739 -#define CDU_COMMON_START 1739 +#define XSEM_FUNC7_END 1776 +#define CDU_COMMON_START 1776 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, - {OP_WB_E1, CDU_REG_L1TT, 0x200033d}, + {OP_WB_E1, CDU_REG_L1TT, 0x200033f}, {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, - {OP_WB_E1, CDU_REG_MATT, 0x20053d}, + {OP_WB_E1, CDU_REG_MATT, 0x20053f}, {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, - {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d}, + {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f}, {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, -#define CDU_COMMON_END 1750 -#define DMAE_COMMON_START 1750 +#define CDU_COMMON_END 1787 +#define DMAE_COMMON_START 1787 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, @@ -2013,24 +2050,24 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, -#define DMAE_COMMON_END 1757 -#define PXP_COMMON_START 1757 - {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563}, +#define DMAE_COMMON_END 1794 +#define PXP_COMMON_START 1794 + {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565}, {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, - {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568}, + {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a}, {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, - {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d}, -#define PXP_COMMON_END 1762 -#define CFC_COMMON_START 1762 + {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f}, +#define PXP_COMMON_END 1799 +#define CFC_COMMON_START 1799 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, {OP_WR, CFC_REG_CONTROL0, 0x10}, {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, -#define CFC_COMMON_END 1766 -#define HC_COMMON_START 1766 +#define CFC_COMMON_END 1803 +#define HC_COMMON_START 1803 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, -#define HC_COMMON_END 1767 -#define HC_PORT0_START 1767 +#define HC_COMMON_END 1804 +#define HC_PORT0_START 1804 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, @@ -2049,8 +2086,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, -#define HC_PORT0_END 1785 -#define HC_PORT1_START 1785 +#define HC_PORT0_END 1822 +#define HC_PORT1_START 1822 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, @@ -2069,8 +2106,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, -#define HC_PORT1_END 1803 -#define HC_FUNC0_START 1803 +#define HC_PORT1_END 1840 +#define HC_FUNC0_START 1840 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, @@ -2086,8 +2123,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, -#define HC_FUNC0_END 1818 -#define HC_FUNC1_START 1818 +#define HC_FUNC0_END 1855 +#define HC_FUNC1_START 1855 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, @@ -2103,8 +2140,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, -#define HC_FUNC1_END 1833 -#define HC_FUNC2_START 1833 +#define HC_FUNC1_END 1870 +#define HC_FUNC2_START 1870 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, @@ -2120,8 +2157,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, -#define HC_FUNC2_END 1848 -#define HC_FUNC3_START 1848 +#define HC_FUNC2_END 1885 +#define HC_FUNC3_START 1885 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, @@ -2137,8 +2174,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, -#define HC_FUNC3_END 1863 -#define HC_FUNC4_START 1863 +#define HC_FUNC3_END 1900 +#define HC_FUNC4_START 1900 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, @@ -2154,8 +2191,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, -#define HC_FUNC4_END 1878 -#define HC_FUNC5_START 1878 +#define HC_FUNC4_END 1915 +#define HC_FUNC5_START 1915 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, @@ -2171,8 +2208,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, -#define HC_FUNC5_END 1893 -#define HC_FUNC6_START 1893 +#define HC_FUNC5_END 1930 +#define HC_FUNC6_START 1930 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, @@ -2188,8 +2225,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, -#define HC_FUNC6_END 1908 -#define HC_FUNC7_START 1908 +#define HC_FUNC6_END 1945 +#define HC_FUNC7_START 1945 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, @@ -2205,8 +2242,8 @@ static const struct raw_op init_ops[] = { {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, -#define HC_FUNC7_END 1923 -#define PXP2_COMMON_START 1923 +#define HC_FUNC7_END 1960 +#define PXP2_COMMON_START 1960 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, @@ -2324,8 +2361,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, -#define PXP2_COMMON_END 2040 -#define MISC_AEU_COMMON_START 2040 +#define PXP2_COMMON_END 2077 +#define MISC_AEU_COMMON_START 2077 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, @@ -2345,8 +2382,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, -#define MISC_AEU_COMMON_END 2059 -#define MISC_AEU_PORT0_START 2059 +#define MISC_AEU_COMMON_END 2096 +#define MISC_AEU_PORT0_START 2096 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, @@ -2379,8 +2416,8 @@ static const struct raw_op init_ops[] = { {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, -#define MISC_AEU_PORT0_END 2091 -#define MISC_AEU_PORT1_START 2091 +#define MISC_AEU_PORT0_END 2128 +#define MISC_AEU_PORT1_START 2128 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, @@ -2413,7 +2450,7 @@ static const struct raw_op init_ops[] = { {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, -#define MISC_AEU_PORT1_END 2123 +#define MISC_AEU_PORT1_END 2160 }; @@ -2523,92 +2560,103 @@ static const u32 init_data_e1[] = { 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, - 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff, + 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000, + 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, + 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, + 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, - 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500, - 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, - 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, - 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180, - 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480, - 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780, - 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80, - 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80, - 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604, - 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000, - 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, + 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080, + 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380, + 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, + 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980, + 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, + 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, + 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, + 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, - 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, - 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, + 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, + 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, - 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, + 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, - 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000, - 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c, + 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, + 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, + 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, + 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, + 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, + 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, + 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, + 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, + 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, - 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c, + 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, - 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, + 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, - 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, - 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, + 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, + 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, - 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c, + 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, + 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, - 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, + 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a, - 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c, - 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, - 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, - 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, - 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, - 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, - 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, - 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, - 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, - 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, + 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, + 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, + 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, + 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, + 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, + 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, + 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, + 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, + 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, + 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, + 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, + 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, + 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, @@ -2630,27 +2678,16 @@ static const u32 init_data_e1[] = { 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, - 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, - 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, - 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, - 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, - 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, - 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, - 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, - 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, - 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, - 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170, - 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000, - 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210, - 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250, - 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180, - 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000, - 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc, - 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, - 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc, - 0xcccccccc, 0x00002000 + 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, + 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, + 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, + 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, + 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, + 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, + 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, + 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, + 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, + 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000 }; static const u32 init_data_e1h[] = { diff --git a/trunk/drivers/net/bnx2x_link.c b/trunk/drivers/net/bnx2x_link.c index 4ce7fe9c5251..ff2743db10d9 100644 --- a/trunk/drivers/net/bnx2x_link.c +++ b/trunk/drivers/net/bnx2x_link.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "bnx2x_reg.h" #include "bnx2x_fw_defs.h" @@ -30,16 +31,17 @@ /********************************************************/ #define SUPPORT_CL73 0 /* Currently no */ -#define ETH_HLEN 14 +#define ETH_HLEN 14 #define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ #define ETH_MIN_PACKET_SIZE 60 #define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define MDIO_ACCESS_TIMEOUT 1000 #define BMAC_CONTROL_RX_ENABLE 2 +#define MAX_MTU_SIZE 5000 /***********************************************************/ -/* Shortcut definitions */ +/* Shortcut definitions */ /***********************************************************/ #define NIG_STATUS_XGXS0_LINK10G \ @@ -78,12 +80,12 @@ #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 -#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM -#define AUTONEG_PARALLEL \ +#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define AUTONEG_PARALLEL \ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION -#define AUTONEG_SGMII_FIBER_AUTODET \ +#define AUTONEG_SGMII_FIBER_AUTODET \ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT -#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY +#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE @@ -200,10 +202,11 @@ static void bnx2x_emac_init(struct link_params *params, /* init emac - use read-modify-write */ /* self clear reset */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); + EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); timeout = 200; - do { + do + { val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); if (!timeout) { @@ -211,18 +214,18 @@ static void bnx2x_emac_init(struct link_params *params, return; } timeout--; - } while (val & EMAC_MODE_RESET); + }while (val & EMAC_MODE_RESET); /* Set mac address */ val = ((params->mac_addr[0] << 8) | params->mac_addr[1]); - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); + EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); val = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | params->mac_addr[5]); - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); + EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); } static u8 bnx2x_emac_enable(struct link_params *params, @@ -283,7 +286,7 @@ static u8 bnx2x_emac_enable(struct link_params *params, if (CHIP_REV_IS_SLOW(bp)) { /* config GMII mode */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, + EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); } else { /* ASIC */ /* pause enable/disable */ @@ -295,19 +298,17 @@ static u8 bnx2x_emac_enable(struct link_params *params, EMAC_RX_MODE_FLOW_EN); bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); + EMAC_TX_MODE_EXT_PAUSE_EN); if (vars->flow_ctrl & FLOW_CTRL_TX) bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); + EMAC_TX_MODE_EXT_PAUSE_EN); } /* KEEP_VLAN_TAG, promiscuous */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); + EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); /* Set Loopback */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); @@ -315,10 +316,10 @@ static u8 bnx2x_emac_enable(struct link_params *params, val |= 0x810; else val &= ~0x810; - EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); + EMAC_WR(EMAC_REG_EMAC_MODE, val); /* enable emac for jumbo packets */ - EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, + EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE, (EMAC_RX_MTU_SIZE_JUMBO_ENA | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); @@ -590,9 +591,9 @@ void bnx2x_link_status_update(struct link_params *params, vars->flow_ctrl &= ~FLOW_CTRL_RX; if (vars->phy_flags & PHY_XGXS_FLAG) { - if (vars->line_speed && - ((vars->line_speed == SPEED_10) || - (vars->line_speed == SPEED_100))) { + if (params->req_line_speed && + ((params->req_line_speed == SPEED_10) || + (params->req_line_speed == SPEED_100))) { vars->phy_flags |= PHY_SGMII_FLAG; } else { vars->phy_flags &= ~PHY_SGMII_FLAG; @@ -644,7 +645,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; - u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); /* Only if the bmac is out of reset */ if (REG_RD(bp, MISC_REG_RESET_REG_2) & @@ -669,6 +670,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, u8 port = params->port; u32 init_crd, crd; u32 count = 1000; + u32 pause = 0; /* disable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); @@ -691,25 +693,33 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, return -EINVAL; } - if (flow_ctrl & FLOW_CTRL_RX || - line_speed == SPEED_10 || - line_speed == SPEED_100 || - line_speed == SPEED_1000 || - line_speed == SPEED_2500) { - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); + if (flow_ctrl & FLOW_CTRL_RX) + pause = 1; + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause); + if (pause) { /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); /* update init credit */ - init_crd = 778; /* (800-18-4) */ + init_crd = 778; /* (800-18-4) */ } else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16; - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); /* update init credit */ switch (line_speed) { + case SPEED_10: + case SPEED_100: + case SPEED_1000: + init_crd = thresh + 55 - 22; + break; + + case SPEED_2500: + init_crd = thresh + 138 - 22; + break; + case SPEED_10000: init_crd = thresh + 553 - 22; break; @@ -754,10 +764,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port) emac_base = GRCBASE_EMAC0; break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; + emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1; break; default: - emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0; break; } return emac_base; @@ -1034,7 +1044,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params) } static void bnx2x_set_parallel_detection(struct link_params *params, - u8 phy_flags) + u8 phy_flags) { struct bnx2x *bp = params->bp; u16 control2; @@ -1104,7 +1114,7 @@ static void bnx2x_set_autoneg(struct link_params *params, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); /* CL37 Autoneg Enabled */ - if (vars->line_speed == SPEED_AUTO_NEG) + if (params->req_line_speed == SPEED_AUTO_NEG) reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; else /* CL37 Autoneg Disabled */ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | @@ -1122,7 +1132,7 @@ static void bnx2x_set_autoneg(struct link_params *params, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; - if (vars->line_speed == SPEED_AUTO_NEG) + if (params->req_line_speed == SPEED_AUTO_NEG) reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; else reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; @@ -1138,7 +1148,7 @@ static void bnx2x_set_autoneg(struct link_params *params, MDIO_REG_BANK_BAM_NEXT_PAGE, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, ®_val); - if (vars->line_speed == SPEED_AUTO_NEG) { + if (params->req_line_speed == SPEED_AUTO_NEG) { /* Enable BAM aneg Mode and TetonII aneg Mode */ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); @@ -1154,7 +1164,7 @@ static void bnx2x_set_autoneg(struct link_params *params, reg_val); /* Enable Clause 73 Aneg */ - if ((vars->line_speed == SPEED_AUTO_NEG) && + if ((params->req_line_speed == SPEED_AUTO_NEG) && (SUPPORT_CL73)) { /* Enable BAM Station Manager */ @@ -1216,8 +1226,7 @@ static void bnx2x_set_autoneg(struct link_params *params, } /* program SerDes, forced speed */ -static void bnx2x_program_serdes(struct link_params *params, - struct link_vars *vars) +static void bnx2x_program_serdes(struct link_params *params) { struct bnx2x *bp = params->bp; u16 reg_val; @@ -1239,35 +1248,28 @@ static void bnx2x_program_serdes(struct link_params *params, /* program speed - needed only if the speed is greater than 1G (2.5G or 10G) */ - CL45_RD_OVER_CL22(bp, params->port, + if (!((params->req_line_speed == SPEED_1000) || + (params->req_line_speed == SPEED_100) || + (params->req_line_speed == SPEED_10))) { + CL45_RD_OVER_CL22(bp, params->port, params->phy_addr, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_MISC1, ®_val); - /* clearing the speed value before setting the right speed */ - DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); - - reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | - MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); - - if (!((vars->line_speed == SPEED_1000) || - (vars->line_speed == SPEED_100) || - (vars->line_speed == SPEED_10))) { - + /* clearing the speed value before setting the right speed */ + reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK; reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); - if (vars->line_speed == SPEED_10000) + if (params->req_line_speed == SPEED_10000) reg_val |= MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; - if (vars->line_speed == SPEED_13000) + if (params->req_line_speed == SPEED_13000) reg_val |= MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; - } - - CL45_WR_OVER_CL22(bp, params->port, + CL45_WR_OVER_CL22(bp, params->port, params->phy_addr, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_MISC1, reg_val); - + } } static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) @@ -1293,49 +1295,48 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) MDIO_OVER_1G_UP3, 0); } -static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc) +static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, + u32 *ieee_fc) { - *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + struct bnx2x *bp = params->bp; + /* for AN, we are always publishing full duplex */ + u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /* resolve pause mode and advertisement * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ switch (params->req_flow_ctrl) { case FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) { - *ieee_fc |= + if (params->mtu <= MAX_MTU_SIZE) { + an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; } else { - *ieee_fc |= + an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; } break; case FLOW_CTRL_TX: - *ieee_fc |= + an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; break; case FLOW_CTRL_RX: case FLOW_CTRL_BOTH: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; break; case FLOW_CTRL_NONE: default: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; break; } -} -static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, - u32 ieee_fc) -{ - struct bnx2x *bp = params->bp; - /* for AN, we are always publishing full duplex */ + *ieee_fc = an_adv; CL45_WR_OVER_CL22(bp, params->port, params->phy_addr, MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc); + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv); } static void bnx2x_restart_autoneg(struct link_params *params) @@ -1381,8 +1382,7 @@ static void bnx2x_restart_autoneg(struct link_params *params) } } -static void bnx2x_initialize_sgmii_process(struct link_params *params, - struct link_vars *vars) +static void bnx2x_initialize_sgmii_process(struct link_params *params) { struct bnx2x *bp = params->bp; u16 control1; @@ -1406,7 +1406,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params, control1); /* if forced speed */ - if (!(vars->line_speed == SPEED_AUTO_NEG)) { + if (!(params->req_line_speed == SPEED_AUTO_NEG)) { /* set speed, disable autoneg */ u16 mii_control; @@ -1419,7 +1419,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params, MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); - switch (vars->line_speed) { + switch (params->req_line_speed) { case SPEED_100: mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; @@ -1433,8 +1433,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params, break; default: /* invalid speed for SGMII */ - DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", - vars->line_speed); + DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n", + params->req_line_speed); break; } @@ -1460,20 +1460,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params, */ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) -{ /* LD LP */ - switch (pause_result) { /* ASYM P ASYM P */ - case 0xb: /* 1 0 1 1 */ +{ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ vars->flow_ctrl = FLOW_CTRL_TX; break; - case 0xe: /* 1 1 1 0 */ + case 0xe: /* 1 1 1 0 */ vars->flow_ctrl = FLOW_CTRL_RX; break; - case 0x5: /* 0 1 0 1 */ - case 0x7: /* 0 1 1 1 */ - case 0xd: /* 1 1 0 1 */ - case 0xf: /* 1 1 1 1 */ + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ vars->flow_ctrl = FLOW_CTRL_BOTH; break; @@ -1531,28 +1531,6 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params, DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", pause_result); bnx2x_pause_resolve(vars, pause_result); - if (vars->flow_ctrl == FLOW_CTRL_NONE && - ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { - bnx2x_cl45_read(bp, port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, &ld_pause); - - bnx2x_cl45_read(bp, port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LP, &lp_pause); - pause_result = (ld_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; - pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; - - bnx2x_pause_resolve(vars, pause_result); - DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n", - pause_result); - } } return ret; } @@ -1563,8 +1541,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params, u32 gp_status) { struct bnx2x *bp = params->bp; - u16 ld_pause; /* local driver */ - u16 lp_pause; /* link partner */ + u16 ld_pause; /* local driver */ + u16 lp_pause; /* link partner */ u16 pause_result; vars->flow_ctrl = FLOW_CTRL_NONE; @@ -1595,10 +1573,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params, (bnx2x_ext_phy_resove_fc(params, vars))) { return; } else { - if (params->req_flow_ctrl == FLOW_CTRL_AUTO) - vars->flow_ctrl = params->req_fc_auto_adv; - else - vars->flow_ctrl = params->req_flow_ctrl; + vars->flow_ctrl = params->req_flow_ctrl; + if (vars->flow_ctrl == FLOW_CTRL_AUTO) { + if (params->mtu <= MAX_MTU_SIZE) + vars->flow_ctrl = FLOW_CTRL_BOTH; + else + vars->flow_ctrl = FLOW_CTRL_TX; + } } DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); } @@ -1609,7 +1590,6 @@ static u8 bnx2x_link_settings_status(struct link_params *params, u32 gp_status) { struct bnx2x *bp = params->bp; - u8 rc = 0; vars->link_status = 0; @@ -1710,11 +1690,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params, vars->link_status |= LINK_STATUS_SERDES_LINK; - if ((params->req_line_speed == SPEED_AUTO_NEG) && - ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || - (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) { + if (params->req_line_speed == SPEED_AUTO_NEG) { vars->autoneg = AUTO_NEG_ENABLED; if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { @@ -1729,18 +1705,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params, } if (vars->flow_ctrl & FLOW_CTRL_TX) - vars->link_status |= - LINK_STATUS_TX_FLOW_CONTROL_ENABLED; + vars->link_status |= + LINK_STATUS_TX_FLOW_CONTROL_ENABLED; if (vars->flow_ctrl & FLOW_CTRL_RX) - vars->link_status |= - LINK_STATUS_RX_FLOW_CONTROL_ENABLED; + vars->link_status |= + LINK_STATUS_RX_FLOW_CONTROL_ENABLED; } else { /* link_down */ DP(NETIF_MSG_LINK, "phy link down\n"); vars->phy_link_up = 0; - + vars->line_speed = 0; vars->duplex = DUPLEX_FULL; vars->flow_ctrl = FLOW_CTRL_NONE; vars->autoneg = AUTO_NEG_DISABLED; @@ -1841,15 +1817,15 @@ static u8 bnx2x_emac_program(struct link_params *params, } /*****************************************************************************/ -/* External Phy section */ +/* External Phy section */ /*****************************************************************************/ -static void bnx2x_hw_reset(struct bnx2x *bp, u8 port) +static void bnx2x_hw_reset(struct bnx2x *bp) { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW); msleep(1); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); } static void bnx2x_ext_phy_reset(struct link_params *params, @@ -1878,11 +1854,10 @@ static void bnx2x_ext_phy_reset(struct link_params *params, /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); /* HW reset */ - bnx2x_hw_reset(bp, params->port); + bnx2x_hw_reset(bp); bnx2x_cl45_write(bp, params->port, ext_phy_type, @@ -1894,8 +1869,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params, /* Unset Low Power Mode and SW reset */ /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); DP(NETIF_MSG_LINK, "XGXS 8072\n"); bnx2x_cl45_write(bp, params->port, @@ -1913,14 +1887,19 @@ static void bnx2x_ext_phy_reset(struct link_params *params, /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); DP(NETIF_MSG_LINK, "XGXS 8073\n"); + bnx2x_cl45_write(bp, + params->port, + ext_phy_type, + ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); } break; @@ -1929,11 +1908,10 @@ static void bnx2x_ext_phy_reset(struct link_params *params, /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH); /* HW reset */ - bnx2x_hw_reset(bp, params->port); + bnx2x_hw_reset(bp); break; @@ -1956,7 +1934,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params, case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: DP(NETIF_MSG_LINK, "SerDes 5482\n"); - bnx2x_hw_reset(bp, params->port); + bnx2x_hw_reset(bp); break; default: @@ -2120,45 +2098,42 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params) } -static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, - u8 ext_phy_addr) +static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) { - u16 fw_ver1, fw_ver2; - /* Boot port from external ROM */ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u8 ext_phy_addr = ((params->ext_phy_config & + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); + u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); + u16 fw_ver1, fw_ver2, val; + /* Need to wait 100ms after reset */ + msleep(100); + /* Boot port from external ROM */ /* EDC grst */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); /* ucode reboot and rst */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x008c); - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001); /* Reset internal microprocessor */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); /* Release srst bit */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); @@ -2167,52 +2142,35 @@ static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, msleep(100); /* Clear ser_boot_ctl bit */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000); - bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); - bnx2x_cl45_read(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &fw_ver2); + bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &fw_ver2); DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); -} - -static void bnx2x_bcm807x_force_10G(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u8 ext_phy_addr = ((params->ext_phy_config & - PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> - PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); - u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); - - /* Force KR or KX */ - bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + /* Only set bit 10 = 1 (Tx power down) */ + bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 0x2040); + MDIO_PMA_REG_TX_POWER_DOWN, &val); + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, - MDIO_PMA_REG_10G_CTRL2, - 0x000b); + MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10)); + + msleep(600); + /* Release bit 10 (Release Tx power down) */ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, - MDIO_PMA_REG_BCM_CTRL, - 0x0000); - bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_CTRL, - 0x0000); + MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); + } + static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) { struct bnx2x *bp = params->bp; @@ -2278,51 +2236,32 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); } - -static void bnx2x_8073_set_pause_cl37(struct link_params *params, - struct link_vars *vars) +static void bnx2x_bcm807x_force_10G(struct link_params *params) { - struct bnx2x *bp = params->bp; - u16 cl37_val; + u8 port = params->port; u8 ext_phy_addr = ((params->ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, &cl37_val); - - cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - } - DP(NETIF_MSG_LINK, - "Ext phy AN advertize cl37 0x%x\n", cl37_val); - - bnx2x_cl45_write(bp, params->port, - ext_phy_type, - ext_phy_addr, + /* Force KR or KX */ + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 0x2040); + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_10G_CTRL2, + 0x000b); + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_BCM_CTRL, + 0x0000); + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, cl37_val); - msleep(500); + MDIO_AN_REG_CTRL, + 0x0000); } static void bnx2x_ext_phy_set_pause(struct link_params *params, @@ -2343,16 +2282,13 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, MDIO_AN_REG_ADV_PAUSE, &val); val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + if (vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + if (vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; @@ -2366,65 +2302,6 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, MDIO_AN_REG_ADV_PAUSE, val); } - -static void bnx2x_init_internal_phy(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - if (!(vars->phy_flags & PHY_SGMII_FLAG)) { - u16 bank, rx_eq; - - rx_eq = ((params->serdes_config & - PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >> - PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT); - - DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); - for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; - bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { - CL45_WR_OVER_CL22(bp, port, - params->phy_addr, - bank , - MDIO_RX0_RX_EQ_BOOST, - ((rx_eq & - MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | - MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); - } - - /* forced speed requested? */ - if (vars->line_speed != SPEED_AUTO_NEG) { - DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); - - /* disable autoneg */ - bnx2x_set_autoneg(params, vars); - - /* program speed and duplex */ - bnx2x_program_serdes(params, vars); - - } else { /* AN_mode */ - DP(NETIF_MSG_LINK, "not SGMII, AN\n"); - - /* AN enabled */ - bnx2x_set_brcm_cl37_advertisment(params); - - /* program duplex & pause advertisement (for aneg) */ - bnx2x_set_ieee_aneg_advertisment(params, - vars->ieee_fc); - - /* enable autoneg */ - bnx2x_set_autoneg(params, vars); - - /* enable and restart AN */ - bnx2x_restart_autoneg(params); - } - - } else { /* SGMII mode */ - DP(NETIF_MSG_LINK, "SGMII\n"); - - bnx2x_initialize_sgmii_process(params, vars); - } -} - static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; @@ -2466,6 +2343,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) switch (ext_phy_type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + DP(NETIF_MSG_LINK, "XGXS Direct\n"); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: @@ -2541,7 +2419,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LP, + MDIO_AN_REG_CL37_FD, 0x0020); /* Enable CL37 AN */ bnx2x_cl45_write(bp, params->port, @@ -2580,43 +2458,54 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) rx_alarm_ctrl_val = 0x400; lasi_ctrl_val = 0x0004; } else { + /* In 8073, port1 is directed through emac0 and + * port0 is directed through emac1 + */ rx_alarm_ctrl_val = (1<<2); + /*lasi_ctrl_val = 0x0005;*/ lasi_ctrl_val = 0x0004; } - /* enable LASI */ - bnx2x_cl45_write(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM_CTRL, - rx_alarm_ctrl_val); - - bnx2x_cl45_write(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LASI_CTRL, - lasi_ctrl_val); - - bnx2x_8073_set_pause_cl37(params, vars); + /* Wait for soft reset to get cleared upto 1 sec */ + for (cnt = 0; cnt < 1000; cnt++) { + bnx2x_cl45_read(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + &ctrl); + if (!(ctrl & (1<<15))) + break; + msleep(1); + } + DP(NETIF_MSG_LINK, + "807x control reg 0x%x (after %d ms)\n", + ctrl, cnt); if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ bnx2x_bcm8072_external_rom_boot(params); } else { - + bnx2x_bcm8073_external_rom_boot(params); /* In case of 8073 with long xaui lines, don't set the 8073 xaui low power*/ bnx2x_bcm8073_set_xaui_low_power_mode(params); } - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_PMA_DEVAD, - 0xca13, - &tmp1); + /* enable LASI */ + bnx2x_cl45_write(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_RX_ALARM_CTRL, + rx_alarm_ctrl_val); + + bnx2x_cl45_write(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LASI_CTRL, + lasi_ctrl_val); bnx2x_cl45_read(bp, params->port, ext_phy_type, @@ -2630,21 +2519,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) /* If this is forced speed, set to KR or KX * (all other are not supported) */ - if (params->loopback_mode == LOOPBACK_EXT) { - bnx2x_bcm807x_force_10G(params); - DP(NETIF_MSG_LINK, - "Forced speed 10G on 807X\n"); - break; - } else { - bnx2x_cl45_write(bp, params->port, - ext_phy_type, ext_phy_addr, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_BCM_CTRL, - 0x0002); - } - if (params->req_line_speed != SPEED_AUTO_NEG) { - if (params->req_line_speed == SPEED_10000) { - val = (1<<7); + if (!(params->req_line_speed == SPEED_AUTO_NEG)) { + if (params->req_line_speed == SPEED_10000) { + bnx2x_bcm807x_force_10G(params); + DP(NETIF_MSG_LINK, + "Forced speed 10G on 807X\n"); + break; } else if (params->req_line_speed == SPEED_2500) { val = (1<<5); @@ -2659,14 +2539,11 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) val |= (1<<7); - /* Note that 2.5G works only when - used with 1G advertisment */ if (params->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | - PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) val |= (1<<5); - DP(NETIF_MSG_LINK, - "807x autoneg val = 0x%x\n", val); + DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); + /*val = ((1<<5)|(1<<7));*/ } bnx2x_cl45_write(bp, params->port, @@ -2677,19 +2554,20 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { - + /* Disable 2.5Ghz */ bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, 0x8329, &tmp1); - - if (((params->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && - (params->req_line_speed == - SPEED_AUTO_NEG)) || - (params->req_line_speed == - SPEED_2500)) { +/* SUPPORT_SPEED_CAPABILITY + (Due to the nature of the link order, its not + possible to enable 2.5G within the autoneg + capabilities) + if (params->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) +*/ + if (params->req_line_speed == SPEED_2500) { u16 phy_ver; /* Allow 2.5G for A1 and above */ bnx2x_cl45_read(bp, params->port, @@ -2697,53 +2575,49 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) ext_phy_addr, MDIO_PMA_DEVAD, 0xc801, &phy_ver); - DP(NETIF_MSG_LINK, "Add 2.5G\n"); + if (phy_ver > 0) tmp1 |= 1; else tmp1 &= 0xfffe; - } else { - DP(NETIF_MSG_LINK, "Disable 2.5G\n"); + } + else tmp1 &= 0xfffe; - } - bnx2x_cl45_write(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, + bnx2x_cl45_write(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_AN_DEVAD, 0x8329, tmp1); } - - /* Add support for CL37 (passive mode) II */ - - bnx2x_cl45_read(bp, params->port, + /* Add support for CL37 (passive mode) I */ + bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, - &tmp1); - + MDIO_AN_REG_CL37_CL73, 0x040c); + /* Add support for CL37 (passive mode) II */ bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, (tmp1 | - ((params->req_duplex == DUPLEX_FULL) ? - 0x20 : 0x40))); - + MDIO_AN_REG_CL37_FD, 0x20); /* Add support for CL37 (passive mode) III */ bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + /* Restart autoneg */ + msleep(500); if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { - /* The SNR will improve about 2db by changing + + /* The SNR will improve about 2db by changing the BW and FEE main tap. Rest commands are executed after link is up*/ - /*Change FFE main cursor to 5 in EDC register*/ + /* Change FFE main cursor to 5 in EDC register */ if (bnx2x_8073_is_snr_needed(params)) bnx2x_cl45_write(bp, params->port, ext_phy_type, @@ -2752,28 +2626,25 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) MDIO_PMA_REG_EDC_FFE_MAIN, 0xFB0C); - /* Enable FEC (Forware Error Correction) - Request in the AN */ - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV2, &tmp1); - - tmp1 |= (1<<15); + /* Enable FEC (Forware Error Correction) + Request in the AN */ + bnx2x_cl45_read(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV2, &tmp1); - bnx2x_cl45_write(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV2, tmp1); + tmp1 |= (1<<15); + bnx2x_cl45_write(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV2, tmp1); } bnx2x_ext_phy_set_pause(params, vars); - /* Restart autoneg */ - msleep(500); bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, @@ -2830,7 +2701,10 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) } } else { /* SerDes */ - +/* ext_phy_addr = ((bp->ext_phy_config & + PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> + PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); +*/ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); switch (ext_phy_type) { case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: @@ -2852,7 +2726,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u32 ext_phy_type; @@ -2893,8 +2767,6 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, MDIO_PMA_REG_RX_SD, &rx_sd); DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); ext_phy_link_up = (rx_sd & 0x1); - if (ext_phy_link_up) - vars->line_speed = SPEED_10000; break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: @@ -2938,13 +2810,6 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, */ ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); - if (ext_phy_link_up) { - if (val2 & (1<<1)) - vars->line_speed = SPEED_1000; - else - vars->line_speed = SPEED_10000; - } - /* clear LASI indication*/ bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, @@ -2955,8 +2820,6 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: { - u16 link_status = 0; - u16 an1000_status = 0; if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { bnx2x_cl45_read(bp, params->port, @@ -2983,9 +2846,14 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); + bnx2x_cl45_read(bp, params->port, + ext_phy_type, + ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LASI_STATUS, &val2); DP(NETIF_MSG_LINK, - "8703 LASI status 0x%x\n", - val1); + "8703 LASI status 0x%x->0x%x\n", + val1, val2); } /* clear the interrupt LASI status register */ @@ -3001,23 +2869,20 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, MDIO_PCS_REG_STATUS, &val1); DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1); - /* Clear MSG-OUT */ + /* Check the LASI */ bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, - 0xca13, - &val1); - - /* Check the LASI */ + MDIO_PMA_REG_RX_ALARM, &val2); bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &val2); - - DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); - + MDIO_PMA_REG_RX_ALARM, + &val1); + DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", + val2, val1); /* Check the link status */ bnx2x_cl45_read(bp, params->port, ext_phy_type, @@ -3040,29 +2905,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { - + u16 an1000_status = 0; if (ext_phy_link_up && - ((params->req_line_speed != - SPEED_10000))) { + ( + (params->req_line_speed != SPEED_10000) + )) { if (bnx2x_bcm8073_xaui_wa(params) != 0) { ext_phy_link_up = 0; break; } - } - bnx2x_cl45_read(bp, params->port, + bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, - MDIO_AN_DEVAD, + MDIO_XS_DEVAD, 0x8304, &an1000_status); - bnx2x_cl45_read(bp, params->port, + bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, - MDIO_AN_DEVAD, + MDIO_XS_DEVAD, 0x8304, &an1000_status); - + } /* Check the link status on 1.1.2 */ bnx2x_cl45_read(bp, params->port, ext_phy_type, @@ -3078,8 +2943,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, "an_link_status=0x%x\n", val2, val1, an1000_status); - ext_phy_link_up = (((val1 & 4) == 4) || - (an1000_status & (1<<1))); + ext_phy_link_up = (((val1 & 4) == 4) || + (an1000_status & (1<<1))); if (ext_phy_link_up && bnx2x_8073_is_snr_needed(params)) { /* The SNR will improve about 2dbby @@ -3103,74 +2968,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, MDIO_PMA_REG_CDR_BANDWIDTH, 0x0333); - - } - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_PMA_DEVAD, - 0xc820, - &link_status); - - /* Bits 0..2 --> speed detected, - bits 13..15--> link is down */ - if ((link_status & (1<<2)) && - (!(link_status & (1<<15)))) { - ext_phy_link_up = 1; - vars->line_speed = SPEED_10000; - DP(NETIF_MSG_LINK, - "port %x: External link" - " up in 10G\n", params->port); - } else if ((link_status & (1<<1)) && - (!(link_status & (1<<14)))) { - ext_phy_link_up = 1; - vars->line_speed = SPEED_2500; - DP(NETIF_MSG_LINK, - "port %x: External link" - " up in 2.5G\n", params->port); - } else if ((link_status & (1<<0)) && - (!(link_status & (1<<13)))) { - ext_phy_link_up = 1; - vars->line_speed = SPEED_1000; - DP(NETIF_MSG_LINK, - "port %x: External link" - " up in 1G\n", params->port); - } else { - ext_phy_link_up = 0; - DP(NETIF_MSG_LINK, - "port %x: External link" - " is down\n", params->port); - } - } else { - /* See if 1G link is up for the 8072 */ - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - 0x8304, - &an1000_status); - bnx2x_cl45_read(bp, params->port, - ext_phy_type, - ext_phy_addr, - MDIO_AN_DEVAD, - 0x8304, - &an1000_status); - if (an1000_status & (1<<1)) { - ext_phy_link_up = 1; - vars->line_speed = SPEED_1000; - DP(NETIF_MSG_LINK, - "port %x: External link" - " up in 1G\n", params->port); - } else if (ext_phy_link_up) { - ext_phy_link_up = 1; - vars->line_speed = SPEED_10000; - DP(NETIF_MSG_LINK, - "port %x: External link" - " up in 10G\n", params->port); } } - - break; } case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: @@ -3207,7 +3006,6 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, &val2); - vars->line_speed = SPEED_10000; DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", val2, @@ -3302,7 +3100,7 @@ static void bnx2x_link_int_enable(struct link_params *params) * link management */ static void bnx2x_link_int_ack(struct link_params *params, - struct link_vars *vars, u8 is_10g) + struct link_vars *vars, u16 is_10g) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -3383,8 +3181,7 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) } -static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr, - u32 ext_phy_type) +static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) { u32 cnt = 0; u16 ctrl = 0; @@ -3395,14 +3192,12 @@ static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr, /* take ext phy out of reset */ bnx2x_set_gpio(bp, - MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_HIGH, - port); + MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_HIGH); bnx2x_set_gpio(bp, - MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_HIGH, - port); + MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_HIGH); /* wait for 5ms */ msleep(5); @@ -3410,7 +3205,7 @@ static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr, for (cnt = 0; cnt < 1000; cnt++) { msleep(1); bnx2x_cl45_read(bp, port, - ext_phy_type, + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, @@ -3422,17 +3217,13 @@ static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr, } } -static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port) +static void bnx2x_turn_off_sf(struct bnx2x *bp) { /* put sf to reset */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW); bnx2x_set_gpio(bp, - MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_LOW, - port); - bnx2x_set_gpio(bp, - MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_LOW, - port); + MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_LOW); } u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, @@ -3462,8 +3253,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, /* Take ext phy out of reset */ if (!driver_loaded) - bnx2x_turn_on_ef(bp, params->port, ext_phy_addr, - ext_phy_type); + bnx2x_turn_on_sf(bp, params->port, ext_phy_addr); /* wait for 1ms */ msleep(1); @@ -3486,16 +3276,11 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, version[4] = '\0'; if (!driver_loaded) - bnx2x_turn_off_sf(bp, params->port); + bnx2x_turn_off_sf(bp); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: { - /* Take ext phy out of reset */ - if (!driver_loaded) - bnx2x_turn_on_ef(bp, params->port, ext_phy_addr, - ext_phy_type); - bnx2x_cl45_read(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, @@ -3548,7 +3333,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params, struct bnx2x *bp = params->bp; if (is_10g) { - u32 md_devad; + u32 md_devad; DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); @@ -3768,8 +3553,6 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, u16 hw_led_mode, u32 chip_id) { u8 rc = 0; - u32 tmp; - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", speed, hw_led_mode); @@ -3778,9 +3561,6 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1); - - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); break; case LED_MODE_OPER: @@ -3792,10 +3572,6 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, LED_BLINK_RATE_VAL); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1); - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, - (tmp & (~EMAC_LED_OVERRIDE))); - if (!CHIP_IS_E1H(bp) && ((speed == SPEED_2500) || (speed == SPEED_1000) || @@ -3846,8 +3622,7 @@ static u8 bnx2x_link_initialize(struct link_params *params, struct bnx2x *bp = params->bp; u8 port = params->port; u8 rc = 0; - u8 non_ext_phy; - u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); + /* Activate the external PHY */ bnx2x_ext_phy_reset(params, vars); @@ -3869,6 +3644,10 @@ static u8 bnx2x_link_initialize(struct link_params *params, bnx2x_set_swap_lanes(params); } + /* Set Parallel Detect */ + if (params->req_line_speed == SPEED_AUTO_NEG) + bnx2x_set_parallel_detection(params, vars->phy_flags); + if (vars->phy_flags & PHY_XGXS_FLAG) { if (params->req_line_speed && ((params->req_line_speed == SPEED_100) || @@ -3878,33 +3657,68 @@ static u8 bnx2x_link_initialize(struct link_params *params, vars->phy_flags &= ~PHY_SGMII_FLAG; } } - /* In case of external phy existance, the line speed would be the - line speed linked up by the external phy. In case it is direct only, - then the line_speed during initialization will be equal to the - req_line_speed*/ - vars->line_speed = params->req_line_speed; - bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc); + if (!(vars->phy_flags & PHY_SGMII_FLAG)) { + u16 bank, rx_eq; - /* init ext phy and enable link state int */ - non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || - (params->loopback_mode == LOOPBACK_XGXS_10) || - (params->loopback_mode == LOOPBACK_EXT_PHY)); - - if (non_ext_phy || - (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) { - if (params->req_line_speed == SPEED_AUTO_NEG) - bnx2x_set_parallel_detection(params, vars->phy_flags); - bnx2x_init_internal_phy(params, vars); + rx_eq = ((params->serdes_config & + PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >> + PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT); + + DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); + for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; + bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { + CL45_WR_OVER_CL22(bp, port, + params->phy_addr, + bank , + MDIO_RX0_RX_EQ_BOOST, + ((rx_eq & + MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | + MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); + } + + /* forced speed requested? */ + if (params->req_line_speed != SPEED_AUTO_NEG) { + DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); + + /* disable autoneg */ + bnx2x_set_autoneg(params, vars); + + /* program speed and duplex */ + bnx2x_program_serdes(params); + vars->ieee_fc = + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + + } else { /* AN_mode */ + DP(NETIF_MSG_LINK, "not SGMII, AN\n"); + + /* AN enabled */ + bnx2x_set_brcm_cl37_advertisment(params); + + /* program duplex & pause advertisement (for aneg) */ + bnx2x_set_ieee_aneg_advertisment(params, + &vars->ieee_fc); + + /* enable autoneg */ + bnx2x_set_autoneg(params, vars); + + /* enable and restart AN */ + bnx2x_restart_autoneg(params); + } + + } else { /* SGMII mode */ + DP(NETIF_MSG_LINK, "SGMII\n"); + + bnx2x_initialize_sgmii_process(params); } - if (!non_ext_phy) - rc |= bnx2x_ext_phy_init(params, vars); + /* init ext phy and enable link state int */ + rc |= bnx2x_ext_phy_init(params, vars); bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - (NIG_STATUS_XGXS0_LINK10G | - NIG_STATUS_XGXS0_LINK_STATUS | - NIG_STATUS_SERDES0_LINK_STATUS)); + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS)); return rc; @@ -3916,23 +3730,15 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) struct bnx2x *bp = params->bp; u32 val; - DP(NETIF_MSG_LINK, "Phy Initialization started \n"); + DP(NETIF_MSG_LINK, "Phy Initialization started\n"); DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", params->req_line_speed, params->req_flow_ctrl); vars->link_status = 0; - vars->phy_link_up = 0; - vars->link_up = 0; - vars->line_speed = 0; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_NONE; - if (params->switch_cfg == SWITCH_CFG_1G) vars->phy_flags = PHY_SERDES_FLAG; else vars->phy_flags = PHY_XGXS_FLAG; - /* disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, (NIG_MASK_XGXS0_LINK_STATUS | @@ -4088,7 +3894,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) } bnx2x_link_initialize(params, vars); - msleep(30); bnx2x_link_int_enable(params); } return 0; @@ -4138,22 +3943,39 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars) /* HW reset */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + MISC_REGISTERS_GPIO_OUTPUT_LOW); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + MISC_REGISTERS_GPIO_OUTPUT_LOW); DP(NETIF_MSG_LINK, "reset external PHY\n"); - } else if (ext_phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { - DP(NETIF_MSG_LINK, "Setting 8073 port %d into " + } else { + + u8 ext_phy_addr = ((ext_phy_config & + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); + + /* SW reset */ + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); + + /* Set Low Power Mode */ + bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<11); + + + if (ext_phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { + DP(NETIF_MSG_LINK, "Setting 8073 port %d into" "low power mode\n", port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + MISC_REGISTERS_GPIO_OUTPUT_LOW); + } } } /* reset the SerDes/XGXS */ @@ -4173,73 +3995,6 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars) return 0; } -static u8 bnx2x_update_link_down(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); - bnx2x_set_led(bp, port, LED_MODE_OFF, - 0, params->hw_led_mode, - params->chip_id); - - /* indicate no mac active */ - vars->mac_type = MAC_TYPE_NONE; - - /* update shared memory */ - vars->link_status = 0; - vars->line_speed = 0; - bnx2x_update_mng(params, vars->link_status); - - /* activate nig drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - - /* reset BigMac */ - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - return 0; -} - -static u8 bnx2x_update_link_up(struct link_params *params, - struct link_vars *vars, - u8 link_10g, u32 gp_status) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u8 rc = 0; - vars->link_status |= LINK_STATUS_LINK_UP; - if (link_10g) { - bnx2x_bmac_enable(params, vars, 0); - bnx2x_set_led(bp, port, LED_MODE_OPER, - SPEED_10000, params->hw_led_mode, - params->chip_id); - - } else { - bnx2x_emac_enable(params, vars, 0); - rc = bnx2x_emac_program(params, vars->line_speed, - vars->duplex); - - /* AN complete? */ - if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { - if (!(vars->phy_flags & - PHY_SGMII_FLAG)) - bnx2x_set_sgmii_tx_driver(params); - } - } - - /* PBF - link up */ - rc |= bnx2x_pbf_update(params, vars->flow_ctrl, - vars->line_speed); - - /* disable drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); - - /* update shared memory */ - bnx2x_update_mng(params, vars->link_status); - return rc; -} /* This function should called upon link interrupt */ /* In case vars->link_up, driver needs to 1. Update the pbf @@ -4257,10 +4012,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port = params->port; + u16 i; u16 gp_status; - u8 link_10g; - u8 ext_phy_link_up, rc = 0; - u32 ext_phy_type; + u16 link_10g; + u8 rc = 0; DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", port, @@ -4276,16 +4031,15 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); - ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); - - /* Check external link change only for non-direct */ - ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars); - /* Read gp_status */ - CL45_RD_OVER_CL22(bp, port, params->phy_addr, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); + /* avoid fast toggling */ + for (i = 0; i < 10; i++) { + msleep(10); + CL45_RD_OVER_CL22(bp, port, params->phy_addr, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + } rc = bnx2x_link_settings_status(params, vars, gp_status); if (rc != 0) @@ -4301,177 +4055,73 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) bnx2x_link_int_ack(params, vars, link_10g); - /* In case external phy link is up, and internal link is down - ( not initialized yet probably after link initialization, it needs - to be initialized. - Note that after link down-up as result of cable plug, - the xgxs link would probably become up again without the need to - initialize it*/ - - if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && - (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) && - (ext_phy_link_up && !vars->phy_link_up)) - bnx2x_init_internal_phy(params, vars); - /* link is up only if both local phy and external phy are up */ - vars->link_up = (ext_phy_link_up && vars->phy_link_up); + vars->link_up = (vars->phy_link_up && + bnx2x_ext_phy_is_link_up(params, vars)); - if (vars->link_up) - rc = bnx2x_update_link_up(params, vars, link_10g, gp_status); - else - rc = bnx2x_update_link_down(params, vars); - - return rc; -} - -static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base) -{ - u8 ext_phy_addr[PORT_MAX]; - u16 val; - s8 port; - - /* PART1 - Reset both phys */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - /* Extract the ext phy address for the port */ - u32 ext_phy_config = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].external_phy_config)); - - /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - ext_phy_addr[port] = - ((ext_phy_config & - PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> - PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); - - /* Need to take the phy out of low power mode in order - to write to access its registers */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - - /* Reset the phy */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 1<<15); + if (!vars->phy_link_up && + REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) { + bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */ } - /* Add delay of 150ms after reset */ - msleep(150); - - /* PART2 - Download firmware to both phys */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - u16 fw_ver1; + if (vars->link_up) { + vars->link_status |= LINK_STATUS_LINK_UP; + if (link_10g) { + bnx2x_bmac_enable(params, vars, 0); + bnx2x_set_led(bp, port, LED_MODE_OPER, + SPEED_10000, params->hw_led_mode, + params->chip_id); - bnx2x_bcm8073_external_rom_boot(bp, port, - ext_phy_addr[port]); + } else { + bnx2x_emac_enable(params, vars, 0); + rc = bnx2x_emac_program(params, vars->line_speed, + vars->duplex); - bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); - if (fw_ver1 == 0) { - DP(NETIF_MSG_LINK, - "bnx2x_8073_common_init_phy port %x " - "fw Download failed\n", port); - return -EINVAL; + /* AN complete? */ + if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { + if (!(vars->phy_flags & + PHY_SGMII_FLAG)) + bnx2x_set_sgmii_tx_driver(params); + } } - /* Only set bit 10 = 1 (Tx power down) */ - bnx2x_cl45_read(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); - - /* Phase1 of TX_POWER_DOWN reset */ - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, - (val | 1<<10)); - } - - /* Toggle Transmitter: Power down and then up with 600ms - delay between */ - msleep(600); - - /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - /* Phase2 of POWER_DOWN_RESET*/ - /* Release bit 10 (Release Tx power down) */ - bnx2x_cl45_read(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); + /* PBF - link up */ + rc |= bnx2x_pbf_update(params, vars->flow_ctrl, + vars->line_speed); - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); - msleep(15); + /* disable drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); - /* Read modify write the SPI-ROM version select register */ - bnx2x_cl45_read(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, &val); - bnx2x_cl45_write(bp, port, - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - ext_phy_addr[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); + /* update shared memory */ + bnx2x_update_mng(params, vars->link_status); - /* set GPIO2 back to LOW */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - } - return 0; + } else { /* link down */ + DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port); + bnx2x_set_led(bp, port, LED_MODE_OFF, + 0, params->hw_led_mode, + params->chip_id); -} + /* indicate no mac active */ + vars->mac_type = MAC_TYPE_NONE; -u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base) -{ - u8 rc = 0; - u32 ext_phy_type; + /* update shared memory */ + vars->link_status = 0; + bnx2x_update_mng(params, vars->link_status); - DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n"); + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - /* Read the ext_phy_type for arbitrary port(0) */ - ext_phy_type = XGXS_EXT_PHY_TYPE( - REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[0].external_phy_config))); + /* reset BigMac */ + bnx2x_bmac_rx_disable(bp, params->port); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - switch (ext_phy_type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - { - rc = bnx2x_8073_common_init_phy(bp, shmem_base); - break; - } - default: - DP(NETIF_MSG_LINK, - "bnx2x_common_init_phy: ext_phy 0x%x not required\n", - ext_phy_type); - break; } return rc; } - - static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) { u16 val, cnt; @@ -4504,7 +4154,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) } #define RESERVED_SIZE 256 /* max application is 160K bytes - data at end of RAM */ -#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE) +#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE /* Header is 14 bytes */ #define HEADER_SIZE 14 @@ -4542,12 +4192,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, size = MAX_APP_SIZE+HEADER_SIZE; } DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); - DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); + DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 and issuing a reset.*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, port); + MISC_REGISTERS_GPIO_HIGH); bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); @@ -4779,8 +4429,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, } /* DSP Remove Download Mode */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_LOW, port); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW); bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); @@ -4788,7 +4437,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, for (cnt = 0; cnt < 100; cnt++) msleep(5); - bnx2x_hw_reset(bp, port); + bnx2x_hw_reset(bp); for (cnt = 0; cnt < 100; cnt++) msleep(5); @@ -4824,7 +4473,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, MDIO_PMA_REG_7101_VER2, &image_revision2); - if (data[0x14e] != (image_revision2&0xFF) || + if (data[0x14e] != (image_revision2&0xFF) || data[0x14f] != ((image_revision2&0xFF00)>>8) || data[0x150] != (image_revision1&0xFF) || data[0x151] != ((image_revision1&0xFF00)>>8)) { @@ -4859,11 +4508,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: /* Take ext phy out of reset */ if (!driver_loaded) - bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type); + bnx2x_turn_on_sf(bp, port, ext_phy_addr); rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, data, size); if (!driver_loaded) - bnx2x_turn_off_sf(bp, port); + bnx2x_turn_off_sf(bp); break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: diff --git a/trunk/drivers/net/bnx2x_link.h b/trunk/drivers/net/bnx2x_link.h index 86d54a17b411..714d37ac95de 100644 --- a/trunk/drivers/net/bnx2x_link.h +++ b/trunk/drivers/net/bnx2x_link.h @@ -55,17 +55,14 @@ struct link_params { #define LOOPBACK_BMAC 2 #define LOOPBACK_XGXS_10 3 #define LOOPBACK_EXT_PHY 4 -#define LOOPBACK_EXT 5 u16 req_duplex; u16 req_flow_ctrl; - u16 req_fc_auto_adv; /* Should be set to TX / BOTH when - req_flow_ctrl is set to AUTO */ u16 req_line_speed; /* Also determine AutoNeg */ /* Device parameters */ u8 mac_addr[6]; - + u16 mtu; /* shmem parameters */ @@ -143,7 +140,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, u8 phy_addr, u8 devad, u16 reg, u16 val); /* Reads the link_status from the shmem, - and update the link vars accordingly */ + and update the link vars accordinaly */ void bnx2x_link_status_update(struct link_params *input, struct link_vars *output); /* returns string representing the fw_version of the external phy */ @@ -152,7 +149,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, /* Set/Unset the led Basically, the CLC takes care of the led for the link, but in case one needs - to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to + to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to blink the led, and LED_MODE_OFF to set the led off.*/ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, u16 hw_led_mode, u32 chip_id); @@ -167,7 +164,5 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config, otherwise link is down*/ u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); -/* One-time initialization for external phy after power up */ -u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base); #endif /* BNX2X_LINK_H */ diff --git a/trunk/drivers/net/bnx2x_main.c b/trunk/drivers/net/bnx2x_main.c index 971576b43687..272a4bd25953 100644 --- a/trunk/drivers/net/bnx2x_main.c +++ b/trunk/drivers/net/bnx2x_main.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -59,8 +60,8 @@ #include "bnx2x.h" #include "bnx2x_init.h" -#define DRV_MODULE_VERSION "1.45.17" -#define DRV_MODULE_RELDATE "2008/08/13" +#define DRV_MODULE_VERSION "1.45.6" +#define DRV_MODULE_RELDATE "2008/06/23" #define BNX2X_BC_VER 0x040200 /* Time in jiffies before concluding the transmitter is hung */ @@ -75,21 +76,23 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -static int disable_tpa; static int use_inta; static int poll; static int debug; +static int disable_tpa; +static int nomcp; static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ static int use_multi; -module_param(disable_tpa, int, 0); module_param(use_inta, int, 0); module_param(poll, int, 0); module_param(debug, int, 0); -MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature"); +module_param(disable_tpa, int, 0); +module_param(nomcp, int, 0); MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); MODULE_PARM_DESC(poll, "use polling (for debug)"); MODULE_PARM_DESC(debug, "default debug msglevel"); +MODULE_PARM_DESC(nomcp, "ignore management CPU"); #ifdef BNX2X_MULTI module_param(use_multi, int, 0); @@ -234,16 +237,17 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, while (*wb_comp != DMAE_COMP_VAL) { DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); - if (!cnt) { - BNX2X_ERR("dmae timeout!\n"); - break; - } - cnt--; /* adjust delay for emulation/FPGA */ if (CHIP_REV_IS_SLOW(bp)) msleep(100); else udelay(5); + + if (!cnt) { + BNX2X_ERR("dmae timeout!\n"); + break; + } + cnt--; } mutex_unlock(&bp->dmae_mutex); @@ -306,16 +310,17 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) while (*wb_comp != DMAE_COMP_VAL) { - if (!cnt) { - BNX2X_ERR("dmae timeout!\n"); - break; - } - cnt--; /* adjust delay for emulation/FPGA */ if (CHIP_REV_IS_SLOW(bp)) msleep(100); else udelay(5); + + if (!cnt) { + BNX2X_ERR("dmae timeout!\n"); + break; + } + cnt--; } DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], @@ -498,9 +503,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp) int i; u16 j, start, end; - bp->stats_state = STATS_STATE_DISABLED; - DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); - BNX2X_ERR("begin crash dump -----------------\n"); for_each_queue(bp, i) { @@ -511,20 +513,17 @@ static void bnx2x_panic_dump(struct bnx2x *bp) " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); - BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)" - " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" - " rx_comp_cons(%x) *rx_cons_sb(%x)\n", - fp->rx_bd_prod, fp->rx_bd_cons, - le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, - fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); - BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" - " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)" - " *sb_u_idx(%x) bd data(%x,%x)\n", - fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx, - fp->status_blk->c_status_block.status_block_index, - fp->fp_u_idx, - fp->status_blk->u_status_block.status_block_index, - hw_prods->packets_prod, hw_prods->bds_prod); + BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" + " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" + " rx_sge_prod(%x) last_max_sge(%x)\n", + fp->rx_comp_prod, fp->rx_comp_cons, + le16_to_cpu(*fp->rx_cons_sb), + le16_to_cpu(*fp->rx_bd_cons_sb), + fp->rx_sge_prod, fp->last_max_sge); + BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" + " bd data(%x,%x) rx_alloc_failed(%lx)\n", + fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, + hw_prods->bds_prod, fp->rx_alloc_failed); start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); @@ -554,8 +553,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp) j, rx_bd[1], rx_bd[0], sw_bd->skb); } - start = RX_SGE(fp->rx_sge_prod); - end = RX_SGE(fp->last_max_sge); + start = 0; + end = RX_SGE_CNT*NUM_RX_SGE_PAGES; for (j = start; j < end; j++) { u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; @@ -583,6 +582,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp) bnx2x_fw_dump(bp); bnx2x_mc_assert(bp); BNX2X_ERR("end crash dump -----------------\n"); + + bp->stats_state = STATS_STATE_DISABLED; + DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); } static void bnx2x_int_enable(struct bnx2x *bp) @@ -682,8 +684,7 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp) static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, u8 storm, u16 index, u8 op, u8 update) { - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_INT_ACK); + u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; struct igu_ack_register igu_ack; igu_ack.status_block_index = index; @@ -693,9 +694,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); - DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", - (*(u32 *)&igu_ack), hc_addr); - REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", + (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); + REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); } static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) @@ -715,15 +716,36 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) return rc; } +static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) +{ + u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + + if ((fp->rx_comp_cons != rx_cons_sb) || + (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || + (fp->tx_pkt_prod != fp->tx_pkt_cons)) + return 1; + + return 0; +} + static u16 bnx2x_ack_int(struct bnx2x *bp) { - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_SIMD_MASK); - u32 result = REG_RD(bp, hc_addr); + u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; + u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); - DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", - result, hc_addr); + DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", + result, BAR_IGU_INTMEM + igu_addr); +#ifdef IGU_DEBUG +#warning IGU_DEBUG active + if (result == 0) { + BNX2X_ERR("read %x from IGU\n", result); + REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0); + } +#endif return result; } @@ -876,7 +898,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) netif_tx_lock(bp->dev); if (netif_queue_stopped(bp->dev) && - (bp->state == BNX2X_STATE_OPEN) && (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) netif_wake_queue(bp->dev); @@ -884,7 +905,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) } } - static void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { @@ -940,7 +960,6 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; break; - case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); @@ -1150,8 +1169,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) memset(fp->sge_mask, 0xff, (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); - /* Clear the two last indices in the page to 1: - these are the indices that correspond to the "next" element, + /* Clear the two last indeces in the page to 1: + these are the indeces that correspond to the "next" element, hence will never be indicated and should be removed from the calculations. */ bnx2x_clear_sge_mask_next_elems(fp); @@ -1242,7 +1261,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, where we are and drop the whole packet */ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); if (unlikely(err)) { - bp->eth_stats.rx_skb_alloc_failed++; + fp->rx_alloc_failed++; return err; } @@ -1278,13 +1297,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + /* if alloc failed drop the packet and keep the buffer in the bin */ if (likely(new_skb)) { - /* fix ip xsum and give it to the stack */ - /* (no need to map the new skb) */ prefetch(skb); prefetch(((char *)(skb)) + 128); + /* else fix ip xsum and give it to the stack */ + /* (no need to map the new skb) */ #ifdef BNX2X_STOP_ON_ERROR if (pad + len > bp->rx_buf_size) { BNX2X_ERR("skb_put is about to fail... " @@ -1333,10 +1353,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, fp->tpa_pool[queue].skb = new_skb; } else { - /* else drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new skb - dropping packet!\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->rx_alloc_failed++; } fp->tpa_state[queue] = BNX2X_TPA_STOP; @@ -1371,6 +1390,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; int rx_pkt = 0; + u16 queue; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -1436,7 +1456,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if ((!fp->disable_tpa) && (TPA_TYPE(cqe_fp_flags) != (TPA_TYPE_START | TPA_TYPE_END))) { - u16 queue = cqe->fast_path_cqe.queue_index; + queue = cqe->fast_path_cqe.queue_index; if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { DP(NETIF_MSG_RX_STATUS, @@ -1483,10 +1503,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) /* is this an error packet? */ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + /* do we sometimes forward error packets anyway? */ DP(NETIF_MSG_RX_ERR, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); - bp->eth_stats.rx_err_discard_pkt++; + /* TBD make sure MC counts this as a drop */ goto reuse_rx; } @@ -1503,7 +1524,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR, "ERROR packet dropped " "because of alloc failure\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->rx_alloc_failed++; goto reuse_rx; } @@ -1529,7 +1550,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because " "of alloc failure\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->rx_alloc_failed++; reuse_rx: bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); goto next_rx; @@ -1538,12 +1559,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_NONE; - if (bp->rx_csum) { - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - bp->eth_stats.hw_csum_err++; - } + if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* TBD do we pass bad csum packets in promisc */ } #ifdef BCM_VLAN @@ -1596,12 +1615,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) struct net_device *dev = bp->dev; int index = FP_IDX(fp); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } - DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", index, FP_SB_ID(fp)); bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); @@ -1635,17 +1648,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) } DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); return IRQ_HANDLED; } -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif - mask = 0x2 << bp->fp[0].sb_id; if (status & mask) { struct bnx2x_fastpath *fp = &bp->fp[0]; @@ -1686,12 +1699,11 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); * General service functions */ -static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) +static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; + u8 port = BP_PORT(bp); int cnt; /* Validating that the resource is within range */ @@ -1702,15 +1714,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) return -EINVAL; } - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - } - /* Validating that the resource is not already taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); + lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); if (lock_status & resource_bit) { DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); @@ -1720,8 +1725,9 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) /* Try for 1 second every 5ms */ for (cnt = 0; cnt < 200; cnt++) { /* Try to acquire the lock */ - REG_WR(bp, hw_lock_control_reg + 4, resource_bit); - lock_status = REG_RD(bp, hw_lock_control_reg); + REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, + resource_bit); + lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); if (lock_status & resource_bit) return 0; @@ -1731,12 +1737,11 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) return -EAGAIN; } -static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) +static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; + u8 port = BP_PORT(bp); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { @@ -1746,27 +1751,20 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) return -EINVAL; } - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - } - /* Validating that the resource is currently taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); + lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); if (!(lock_status & resource_bit)) { DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); return -EFAULT; } - REG_WR(bp, hw_lock_control_reg, resource_bit); + REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); return 0; } /* HW Lock for shared dual port PHYs */ -static void bnx2x_acquire_phy_lock(struct bnx2x *bp) +static void bnx2x_phy_hw_lock(struct bnx2x *bp) { u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); @@ -1774,25 +1772,25 @@ static void bnx2x_acquire_phy_lock(struct bnx2x *bp) if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); + bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); } -static void bnx2x_release_phy_lock(struct bnx2x *bp) +static void bnx2x_phy_hw_unlock(struct bnx2x *bp) { u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); + bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); mutex_unlock(&bp->port.phy_mutex); } -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); int gpio_shift = gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); u32 gpio_mask = (1 << gpio_shift); @@ -1803,7 +1801,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) return -EINVAL; } - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); @@ -1824,7 +1822,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; - case MISC_REGISTERS_GPIO_INPUT_HI_Z: + case MISC_REGISTERS_GPIO_INPUT_HI_Z : DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ @@ -1836,7 +1834,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) } REG_WR(bp, MISC_REG_GPIO, gpio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); return 0; } @@ -1852,19 +1850,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) return -EINVAL; } - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); switch (mode) { - case MISC_REGISTERS_SPIO_OUTPUT_LOW: + case MISC_REGISTERS_SPIO_OUTPUT_LOW : DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); /* clear FLOAT and set CLR */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); break; - case MISC_REGISTERS_SPIO_OUTPUT_HIGH: + case MISC_REGISTERS_SPIO_OUTPUT_HIGH : DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); /* clear FLOAT and set SET */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); @@ -1882,7 +1880,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) } REG_WR(bp, MISC_REG_SPIO, spio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); return 0; } @@ -1942,63 +1940,46 @@ static void bnx2x_link_report(struct bnx2x *bp) static u8 bnx2x_initial_phy_init(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - u8 rc; + u8 rc; - /* Initialize link parameters structure variables */ - /* It is recommended to turn off RX FC for jumbo frames - for better performance */ - if (IS_E1HMF(bp)) - bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; - else if (bp->dev->mtu > 5000) - bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX; - else - bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; + /* Initialize link parameters structure variables */ + bp->link_params.mtu = bp->dev->mtu; - bnx2x_acquire_phy_lock(bp); - rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_lock(bp); + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_phy_hw_unlock(bp); - if (bp->link_vars.link_up) - bnx2x_link_report(bp); + if (bp->link_vars.link_up) + bnx2x_link_report(bp); - bnx2x_calc_fc_adv(bp); + bnx2x_calc_fc_adv(bp); - return rc; - } - BNX2X_ERR("Bootcode is missing -not initializing link\n"); - return -EINVAL; + return rc; } static void bnx2x_link_set(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_lock(bp); + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_phy_hw_unlock(bp); - bnx2x_calc_fc_adv(bp); - } else - BNX2X_ERR("Bootcode is missing -not setting link\n"); + bnx2x_calc_fc_adv(bp); } static void bnx2x__link_reset(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing -not resetting link\n"); + bnx2x_phy_hw_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars); + bnx2x_phy_hw_unlock(bp); } static u8 bnx2x_link_test(struct bnx2x *bp) { u8 rc; - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); return rc; } @@ -2010,7 +1991,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp) sum of vn_min_rates or 0 - if all the min_rates are 0. - In the later case fairness algorithm should be deactivated. + In the later case fainess algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ @@ -2133,7 +2114,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, FUNC_MF_CFG_MIN_BW_SHIFT) * 100; /* If FAIRNESS is enabled (not all min rates are zeroes) and if current min rate is zero - set it to 1. - This is a requirement of the algorithm. */ + This is a requirment of the algorithm. */ if ((vn_min_rate == 0) && wsum) vn_min_rate = DEF_MIN_RATE; vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> @@ -2222,9 +2203,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) /* Make sure that we are synced with the current statistics */ bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); bnx2x_link_update(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); if (bp->link_vars.link_up) { @@ -2376,7 +2357,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, } /* acquire split MCP access lock register */ -static int bnx2x_acquire_alr(struct bnx2x *bp) +static int bnx2x_lock_alr(struct bnx2x *bp) { u32 i, j, val; int rc = 0; @@ -2393,15 +2374,15 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) msleep(5); } if (!(val & (1L << 31))) { - BNX2X_ERR("Cannot acquire MCP access lock register\n"); + BNX2X_ERR("Cannot acquire nvram interface\n"); rc = -EBUSY; } return rc; } -/* release split MCP access lock register */ -static void bnx2x_release_alr(struct bnx2x *bp) +/* Release split MCP access lock register */ +static void bnx2x_unlock_alr(struct bnx2x *bp) { u32 val = 0; @@ -2414,6 +2395,7 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) u16 rc = 0; barrier(); /* status block is written to by the chip */ + if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= 1; @@ -2444,31 +2426,26 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) { int port = BP_PORT(bp); - u32 hc_addr = (HC_REG_COMMAND_REG + port*32 + - COMMAND_REG_ATTN_BITS_SET); + int func = BP_FUNC(bp); + u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; - u32 aeu_mask; + if (~bp->aeu_mask & (asserted & 0xff)) + BNX2X_ERR("IGU ERROR\n"); if (bp->attn_state & asserted) BNX2X_ERR("IGU ERROR\n"); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, aeu_addr); - DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", - aeu_mask, asserted); - aeu_mask &= ~(asserted & 0xff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + bp->aeu_mask, asserted); + bp->aeu_mask &= ~(asserted & 0xff); + DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); - REG_WR(bp, aeu_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + REG_WR(bp, aeu_addr, bp->aeu_mask); - DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bp->attn_state |= asserted; - DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { @@ -2523,9 +2500,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) } /* if hardwired */ - DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", - asserted, hc_addr); - REG_WR(bp, hc_addr, asserted); + DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", + asserted, BAR_IGU_INTMEM + igu_addr); + REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) @@ -2553,12 +2530,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* Fan failure attention */ - /* The PHY reset is controlled by GPIO 1 */ + /* The PHY reset is controled by GPIO 1 */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - /* Low power mode is controlled by GPIO 2 */ + MISC_REGISTERS_GPIO_OUTPUT_LOW); + /* Low power mode is controled by GPIO 2 */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW); /* mark the failure */ bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; @@ -2722,11 +2699,10 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) int index; u32 reg_addr; u32 val; - u32 aeu_mask; /* need to take HW lock because MCP or other port might also try to handle this event */ - bnx2x_acquire_alr(bp); + bnx2x_lock_alr(bp); attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); @@ -2758,35 +2734,32 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) HW_PRTY_ASSERT_SET_1) || (attn.sig[2] & group_mask.sig[2] & HW_PRTY_ASSERT_SET_2)) - BNX2X_ERR("FATAL HW block parity attention\n"); + BNX2X_ERR("FATAL HW block parity attention\n"); } } - bnx2x_release_alr(bp); + bnx2x_unlock_alr(bp); - reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); + reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; val = ~deasserted; - DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", - val, reg_addr); - REG_WR(bp, reg_addr, val); +/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", + val, BAR_IGU_INTMEM + reg_addr); */ + REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); + if (bp->aeu_mask & (deasserted & 0xff)) + BNX2X_ERR("IGU BUG!\n"); if (~bp->attn_state & deasserted) - BNX2X_ERR("IGU ERROR\n"); + BNX2X_ERR("IGU BUG!\n"); reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, reg_addr); - - DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", - aeu_mask, deasserted); - aeu_mask |= (deasserted & 0xff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); + bp->aeu_mask |= (deasserted & 0xff); - REG_WR(bp, reg_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); + REG_WR(bp, reg_addr, bp->aeu_mask); DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bp->attn_state &= ~deasserted; @@ -2827,7 +2800,7 @@ static void bnx2x_sp_task(struct work_struct *work) /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); return; } @@ -2835,7 +2808,7 @@ static void bnx2x_sp_task(struct work_struct *work) /* if (status == 0) */ /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ - DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); + DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); /* HW attentions */ if (status & 0x1) @@ -2865,7 +2838,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); return IRQ_HANDLED; } @@ -2903,11 +2876,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* underflow */ \ d_hi = m_hi - s_hi; \ if (d_hi > 0) { \ - /* we can 'loan' 1 */ \ + /* we can 'loan' 1 */ \ d_hi--; \ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ } else { \ - /* m_hi <= s_hi */ \ + /* m_hi <= s_hi */ \ d_hi = 0; \ d_lo = 0; \ } \ @@ -2917,7 +2890,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) d_hi = 0; \ d_lo = 0; \ } else { \ - /* m_hi >= s_hi */ \ + /* m_hi >= s_hi */ \ d_hi = m_hi - s_hi; \ d_lo = m_lo - s_lo; \ } \ @@ -2990,6 +2963,37 @@ static inline long bnx2x_hilo(u32 *hiref) * Init service functions */ +static void bnx2x_storm_stats_init(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1); + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); + + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); + + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0); + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); + + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), + U64_LO(bnx2x_sp_mapping(bp, fw_stats))); + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, + U64_HI(bnx2x_sp_mapping(bp, fw_stats))); + + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), + U64_LO(bnx2x_sp_mapping(bp, fw_stats))); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, + U64_HI(bnx2x_sp_mapping(bp, fw_stats))); +} + static void bnx2x_storm_stats_post(struct bnx2x *bp) { if (!bp->stats_pending) { @@ -3028,8 +3032,6 @@ static void bnx2x_stats_init(struct bnx2x *bp) memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); bp->port.old_nig_stats.brb_discard = REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); - bp->port.old_nig_stats.brb_truncate = - REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, @@ -3099,12 +3101,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp) might_sleep(); while (*stats_comp != DMAE_COMP_VAL) { + msleep(1); if (!cnt) { BNX2X_ERR("timeout waiting for stats finished\n"); break; } cnt--; - msleep(1); } return 1; } @@ -3449,7 +3451,8 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); - UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); @@ -3533,8 +3536,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, new->brb_discard - old->brb_discard); - ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, - new->brb_truncate - old->brb_truncate); UPDATE_STAT64_NIG(egress_mac_pkt0, etherstatspkts1024octetsto1522octets); @@ -3712,7 +3713,8 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->rx_length_errors = estats->rx_stat_etherstatsundersizepkts_lo + estats->jabber_packets_received; - nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo; + nstats->rx_over_errors = estats->brb_drop_lo + + estats->brb_truncate_discard; nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; nstats->rx_fifo_errors = old_tclient->no_buff_discard; @@ -3781,7 +3783,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) bp->fp->rx_comp_cons), le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", - netif_queue_stopped(bp->dev) ? "Xoff" : "Xon", + netif_queue_stopped(bp->dev)? "Xoff" : "Xon", estats->driver_xoff, estats->brb_drop_lo); printk(KERN_DEBUG "tstats: checksum_discard %u " "packets_too_big_discard %u no_buff_discard %u " @@ -3992,14 +3994,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, - sizeof(struct ustorm_status_block)/4); + sizeof(struct ustorm_def_status_block)/4); bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, - sizeof(struct cstorm_status_block)/4); + sizeof(struct cstorm_def_status_block)/4); } -static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, - dma_addr_t mapping, int sb_id) +static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, + struct host_status_block *sb, dma_addr_t mapping) { int port = BP_PORT(bp); int func = BP_FUNC(bp); @@ -4075,6 +4077,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, atten_status_block); def_sb->atten_status_block.status_block_id = sb_id; + bp->def_att_idx = 0; bp->attn_state = 0; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : @@ -4091,6 +4094,9 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, reg_offset + 0xc + 0x10*index); } + bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0)); + reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L); @@ -4108,13 +4114,17 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, u_def_status_block); def_sb->u_def_status_block.status_block_id = sb_id; + bp->def_u_idx = 0; + REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); REG_WR(bp, BAR_USTRORM_INTMEM + ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section)); - REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + + REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); + REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func), + BNX2X_BTR); for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) REG_WR16(bp, BAR_USTRORM_INTMEM + @@ -4125,13 +4135,17 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, c_def_status_block); def_sb->c_def_status_block.status_block_id = sb_id; + bp->def_c_idx = 0; + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); REG_WR(bp, BAR_CSTRORM_INTMEM + ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + + REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func), + BNX2X_BTR); for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) REG_WR16(bp, BAR_CSTRORM_INTMEM + @@ -4142,13 +4156,17 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, t_def_status_block); def_sb->t_def_status_block.status_block_id = sb_id; + bp->def_t_idx = 0; + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); REG_WR(bp, BAR_TSTRORM_INTMEM + ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section)); - REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + + REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func), + BNX2X_BTR); for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) REG_WR16(bp, BAR_TSTRORM_INTMEM + @@ -4159,20 +4177,23 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, x_def_status_block); def_sb->x_def_status_block.status_block_id = sb_id; + bp->def_x_idx = 0; + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); REG_WR(bp, BAR_XSTRORM_INTMEM + ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section)); - REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + + REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func), + BNX2X_BTR); for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); bp->stats_pending = 0; - bp->set_mac_pending = 0; bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); } @@ -4188,25 +4209,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) /* HC_INDEX_U_ETH_RX_CQ_CONS */ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, - U_SB_ETH_RX_CQ_INDEX), + HC_INDEX_U_ETH_RX_CQ_CONS), bp->rx_ticks/12); REG_WR16(bp, BAR_USTRORM_INTMEM + USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, - U_SB_ETH_RX_CQ_INDEX), - bp->rx_ticks ? 0 : 1); - REG_WR16(bp, BAR_USTRORM_INTMEM + - USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, - U_SB_ETH_RX_BD_INDEX), + HC_INDEX_U_ETH_RX_CQ_CONS), bp->rx_ticks ? 0 : 1); /* HC_INDEX_C_ETH_TX_CQ_CONS */ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, - C_SB_ETH_TX_CQ_INDEX), + HC_INDEX_C_ETH_TX_CQ_CONS), bp->tx_ticks/12); REG_WR16(bp, BAR_CSTRORM_INTMEM + CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, - C_SB_ETH_TX_CQ_INDEX), + HC_INDEX_C_ETH_TX_CQ_CONS), bp->tx_ticks ? 0 : 1); } } @@ -4239,9 +4256,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, static void bnx2x_init_rx_rings(struct bnx2x *bp) { int func = BP_FUNC(bp); - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H; - u16 ring_prod, cqe_ring_prod; + u16 ring_prod, cqe_ring_prod = 0; int i, j; bp->rx_buf_use_size = bp->dev->mtu; @@ -4255,9 +4270,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) bp->dev->mtu + ETH_OVREHEAD); for_each_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; + for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { + struct bnx2x_fastpath *fp = &bp->fp[j]; - for (i = 0; i < max_agg_queues; i++) { fp->tpa_pool[i].skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); if (!fp->tpa_pool[i].skb) { @@ -4337,7 +4352,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) BNX2X_ERR("disabling TPA for queue[%d]\n", j); /* Cleanup already allocated elements */ bnx2x_free_rx_sge_range(bp, fp, ring_prod); - bnx2x_free_tpa_pool(bp, fp, max_agg_queues); + bnx2x_free_tpa_pool(bp, fp, + ETH_MAX_AGGREGATION_QUEUES_E1H); fp->disable_tpa = 1; ring_prod = 0; break; @@ -4347,13 +4363,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) fp->rx_sge_prod = ring_prod; /* Allocate BDs and initialize BD ring */ - fp->rx_comp_cons = 0; + fp->rx_comp_cons = fp->rx_alloc_failed = 0; cqe_ring_prod = ring_prod = 0; for (i = 0; i < bp->rx_ring_size; i++) { if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { BNX2X_ERR("was only able to allocate " "%d rx skbs\n", i); - bp->eth_stats.rx_skb_alloc_failed++; + fp->rx_alloc_failed++; break; } ring_prod = NEXT_RX_IDX(ring_prod); @@ -4481,7 +4497,7 @@ static void bnx2x_init_context(struct bnx2x *bp) } context->cstorm_st_context.sb_index_number = - C_SB_ETH_TX_CQ_INDEX; + HC_INDEX_C_ETH_TX_CQ_CONS; context->cstorm_st_context.status_block_id = sb_id; context->xstorm_ag_context.cdu_reserved = @@ -4519,7 +4535,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp) int i; tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; - tstorm_client.statistics_counter_id = BP_CL_ID(bp); + tstorm_client.statistics_counter_id = 0; tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; #ifdef BCM_VLAN @@ -4563,7 +4579,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) int func = BP_FUNC(bp); int i; - DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask); + DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); switch (mode) { case BNX2X_RX_MODE_NONE: /* no Rx */ @@ -4601,35 +4617,13 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) bnx2x_set_client_config(bp); } -static void bnx2x_init_internal_common(struct bnx2x *bp) -{ - int i; - - /* Zero this manually as its initialization is - currently missing in the initTool */ - for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_AGG_DATA_OFFSET + i * 4, 0); -} - -static void bnx2x_init_internal_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - - REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR); - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); -} - -static void bnx2x_init_internal_func(struct bnx2x *bp) +static void bnx2x_init_internal(struct bnx2x *bp) { struct tstorm_eth_function_common_config tstorm_config = {0}; struct stats_indication_flags stats_flags = {0}; int port = BP_PORT(bp); int func = BP_FUNC(bp); int i; - u16 max_agg_size; if (is_multi(bp)) { tstorm_config.config_flags = MULTI_FLAGS; @@ -4642,53 +4636,31 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), (*(u32 *)&tstorm_config)); +/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", + (*(u32 *)&tstorm_config)); */ + bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ bnx2x_set_storm_rx_mode(bp); - /* reset xstorm per client statistics */ - for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) { - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + - i*4, 0); - } - /* reset tstorm per client statistics */ - for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) { - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + - i*4, 0); - } - - /* Init statistics related context */ stats_flags.collect_eth = 1; - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4, + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, ((u32 *)&stats_flags)[1]); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, ((u32 *)&stats_flags)[1]); - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, ((u32 *)&stats_flags)[1]); - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), - U64_LO(bnx2x_sp_mapping(bp, fw_stats))); - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, - U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), - U64_LO(bnx2x_sp_mapping(bp, fw_stats))); - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, - U64_HI(bnx2x_sp_mapping(bp, fw_stats))); +/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", + ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ if (CHIP_IS_E1H(bp)) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, @@ -4704,12 +4676,15 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) bp->e1hov); } - /* Init CQ ring mapping and aggregation size */ - max_agg_size = min((u32)(bp->rx_buf_use_size + - 8*BCM_PAGE_SIZE*PAGES_PER_SGE), - (u32)0xffff); + /* Zero this manualy as its initialization is + currently missing in the initTool */ + for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_AGG_DATA_OFFSET + 4*i, 0); + for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + u16 max_agg_size; REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), @@ -4718,34 +4693,16 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, U64_HI(fp->rx_comp_mapping)); + max_agg_size = min((u32)(bp->rx_buf_use_size + + 8*BCM_PAGE_SIZE*PAGES_PER_SGE), + (u32)0xffff); REG_WR16(bp, BAR_USTRORM_INTMEM + USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), max_agg_size); } } -static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) -{ - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON: - bnx2x_init_internal_common(bp); - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_PORT: - bnx2x_init_internal_port(bp); - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - bnx2x_init_internal_func(bp); - break; - - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - break; - } -} - -static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) +static void bnx2x_nic_init(struct bnx2x *bp) { int i; @@ -4760,20 +4717,19 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); - bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, - FP_SB_ID(fp)); - bnx2x_update_fpsb_idx(fp); + bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, + fp->status_blk_mapping); } - bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping, - DEF_SB_ID); - bnx2x_update_dsb_idx(bp); + bnx2x_init_def_sb(bp, bp->def_status_blk, + bp->def_status_blk_mapping, DEF_SB_ID); bnx2x_update_coalesce(bp); bnx2x_init_rx_rings(bp); bnx2x_init_tx_ring(bp); bnx2x_init_sp_ring(bp); bnx2x_init_context(bp); - bnx2x_init_internal(bp, load_code); + bnx2x_init_internal(bp); + bnx2x_storm_stats_init(bp); bnx2x_init_ind_table(bp); bnx2x_int_enable(bp); } @@ -4922,7 +4878,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); @@ -4977,7 +4933,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); @@ -5044,7 +5000,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); REG_WR(bp, CFC_REG_DEBUG0, 0x0); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); + NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); DP(NETIF_MSG_HW, "done\n"); @@ -5133,6 +5089,11 @@ static int bnx2x_init_common(struct bnx2x *bp) REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif +#ifndef BCM_ISCSI + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif + REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); #ifdef BCM_ISCSI REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); @@ -5202,8 +5163,6 @@ static int bnx2x_init_common(struct bnx2x *bp) } bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); if (CHIP_IS_E1H(bp)) REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); @@ -5374,13 +5333,6 @@ static int bnx2x_init_common(struct bnx2x *bp) ((u32 *)&tmp)[1]); } - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_common_init_phy(bp, bp->common.shmem_base); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - return 0; } @@ -5686,23 +5638,18 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) int func = BP_FUNC(bp); u32 seq = ++bp->fw_seq; u32 rc = 0; - u32 cnt = 1; - u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); - do { - /* let the FW do it's magic ... */ - msleep(delay); - - rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); + /* let the FW do it's magic ... */ + msleep(100); /* TBD */ - /* Give the FW up to 2 second (200*10ms) */ - } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); + if (CHIP_REV_IS_SLOW(bp)) + msleep(900); - DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", - cnt*delay, rc, seq); + rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); + DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { @@ -5766,7 +5713,6 @@ static void bnx2x_free_mem(struct bnx2x *bp) NUM_RCQ_BD); /* SGE ring */ - BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring)); BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), bnx2x_fp(bp, i, rx_sge_mapping), BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); @@ -5944,8 +5890,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) dev_kfree_skb(skb); } if (!fp->disable_tpa) - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? - ETH_MAX_AGGREGATION_QUEUES_E1 : + bnx2x_free_tpa_pool(bp, fp, ETH_MAX_AGGREGATION_QUEUES_E1H); } } @@ -6031,8 +5976,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) bnx2x_msix_fp_int, 0, bp->dev->name, &bp->fp[i]); if (rc) { - BNX2X_ERR("request fp #%d irq failed rc -%d\n", - i + offset, -rc); + BNX2X_ERR("request fp #%d irq failed rc %d\n", + i + offset, rc); bnx2x_free_msix_irqs(bp); return -EBUSY; } @@ -6059,7 +6004,7 @@ static int bnx2x_req_irq(struct bnx2x *bp) * Init service functions */ -static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) +static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) { struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); int port = BP_PORT(bp); @@ -6081,15 +6026,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) config->config_table[0].cam_entry.lsb_mac_addr = swab16(*(u16 *)&bp->dev->dev_addr[4]); config->config_table[0].cam_entry.flags = cpu_to_le16(port); - if (set) - config->config_table[0].target_table_entry.flags = 0; - else - CAM_INVALIDATE(config->config_table[0]); + config->config_table[0].target_table_entry.flags = 0; config->config_table[0].target_table_entry.client_id = 0; config->config_table[0].target_table_entry.vlan_id = 0; - DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", - (set ? "setting" : "clearing"), + DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", config->config_table[0].cam_entry.msb_mac_addr, config->config_table[0].cam_entry.middle_mac_addr, config->config_table[0].cam_entry.lsb_mac_addr); @@ -6099,11 +6040,8 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) config->config_table[1].cam_entry.middle_mac_addr = 0xffff; config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; config->config_table[1].cam_entry.flags = cpu_to_le16(port); - if (set) - config->config_table[1].target_table_entry.flags = + config->config_table[1].target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; - else - CAM_INVALIDATE(config->config_table[1]); config->config_table[1].target_table_entry.client_id = 0; config->config_table[1].target_table_entry.vlan_id = 0; @@ -6112,12 +6050,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); } -static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) +static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) { struct mac_configuration_cmd_e1h *config = (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); - if (set && (bp->state != BNX2X_STATE_OPEN)) { + if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; } @@ -6141,14 +6079,9 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) config->config_table[0].client_id = BP_L_ID(bp); config->config_table[0].vlan_id = 0; config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); - if (set) - config->config_table[0].flags = BP_PORT(bp); - else - config->config_table[0].flags = - MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; + config->config_table[0].flags = BP_PORT(bp); - DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", - (set ? "setting" : "clearing"), + DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", config->config_table[0].msb_mac_addr, config->config_table[0].middle_mac_addr, config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); @@ -6173,13 +6106,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, bnx2x_rx_int(bp->fp, 10); /* if index is different from 0 * the reply for some commands will - * be on the non default queue + * be on the none default queue */ if (idx) bnx2x_rx_int(&bp->fp[idx], 10); } - mb(); /* state is changed by bnx2x_sp_event() */ + if (*state_p == state) return 0; @@ -6234,6 +6167,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { u32 load_code; int i, rc; + #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return -EPERM; @@ -6249,24 +6183,22 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (!BP_NOMCP(bp)) { load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); + BNX2X_ERR("MCP response failure, unloading\n"); return -EBUSY; } if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) return -EBUSY; /* other port in diagnostic mode */ } else { - int port = BP_PORT(bp); - DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", load_count[0], load_count[1], load_count[2]); load_count[0]++; - load_count[1 + port]++; + load_count[1 + BP_PORT(bp)]++; DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", load_count[0], load_count[1], load_count[2]); if (load_count[0] == 1) load_code = FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[1 + port] == 1) + else if (load_count[1 + BP_PORT(bp)] == 1) load_code = FW_MSG_CODE_DRV_LOAD_PORT; else load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; @@ -6315,6 +6247,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_fp(bp, i, disable_tpa) = ((bp->flags & TPA_ENABLE_FLAG) == 0); + /* Disable interrupt handling until HW is initialized */ + atomic_set(&bp->intr_sem, 1); + if (bp->flags & USING_MSIX_FLAG) { rc = bnx2x_req_msix_irqs(bp); if (rc) { @@ -6341,14 +6276,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) goto load_error; } + /* Enable interrupt handling */ + atomic_set(&bp->intr_sem, 0); + /* Setup NIC internals and enable interrupts */ - bnx2x_nic_init(bp, load_code); + bnx2x_nic_init(bp); /* Send LOAD_DONE command to MCP */ if (!BP_NOMCP(bp)) { load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); + BNX2X_ERR("MCP response failure, unloading\n"); rc = -EBUSY; goto load_int_disable; } @@ -6363,12 +6301,11 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) for_each_queue(bp, i) napi_enable(&bnx2x_fp(bp, i, napi)); - /* Enable interrupt handling */ - atomic_set(&bp->intr_sem, 0); - rc = bnx2x_setup_leading(bp); if (rc) { - BNX2X_ERR("Setup leading failed!\n"); +#ifdef BNX2X_STOP_ON_ERROR + bp->panic = 1; +#endif goto load_stop_netif; } @@ -6386,9 +6323,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } if (CHIP_IS_E1(bp)) - bnx2x_set_mac_addr_e1(bp, 1); + bnx2x_set_mac_addr_e1(bp); else - bnx2x_set_mac_addr_e1h(bp, 1); + bnx2x_set_mac_addr_e1h(bp); if (bp->port.pmf) bnx2x_initial_phy_init(bp); @@ -6402,6 +6339,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; case LOAD_OPEN: + /* IRQ is only requested from bnx2x_open */ netif_start_queue(bp->dev); bnx2x_set_rx_mode(bp->dev); if (bp->flags & USING_MSIX_FLAG) @@ -6440,7 +6378,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_rx_sge_range(bp, bp->fp + i, + RX_SGE_CNT*NUM_RX_SGE_PAGES); load_error: bnx2x_free_mem(bp); @@ -6472,7 +6411,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) return rc; } -static int bnx2x_stop_leading(struct bnx2x *bp) +static void bnx2x_stop_leading(struct bnx2x *bp) { u16 dsb_sp_prod_idx; /* if the other port is handling traffic, @@ -6490,7 +6429,7 @@ static int bnx2x_stop_leading(struct bnx2x *bp) rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, &(bp->fp[0].state), 1); if (rc) /* timeout */ - return rc; + return; dsb_sp_prod_idx = *bp->dsb_sp_prod; @@ -6502,24 +6441,20 @@ static int bnx2x_stop_leading(struct bnx2x *bp) so there is not much to do if this times out */ while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { + msleep(1); if (!cnt) { DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", *bp->dsb_sp_prod, dsb_sp_prod_idx); #ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); -#else - rc = -EBUSY; #endif break; } cnt--; - msleep(1); } bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; bp->fp[0].state = BNX2X_FP_STATE_CLOSED; - - return rc; } static void bnx2x_reset_func(struct bnx2x *bp) @@ -6561,7 +6496,7 @@ static void bnx2x_reset_port(struct bnx2x *bp) val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) DP(NETIF_MSG_IFDOWN, - "BRB1 is not empty %d blocks are occupied\n", val); + "BRB1 is not empty %d blooks are occupied\n", val); /* TODO: Close Doorbell port? */ } @@ -6601,12 +6536,11 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) } } -/* must be called with rtnl_lock */ +/* msut be called with rtnl_lock */ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) { - int port = BP_PORT(bp); u32 reset_code = 0; - int i, cnt, rc; + int i, cnt; bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; @@ -6623,17 +6557,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); bnx2x_stats_handle(bp, STATS_EVENT_STOP); - /* Wait until tx fast path tasks complete */ + /* Wait until all fast path tasks complete */ for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; +#ifdef BNX2X_STOP_ON_ERROR +#ifdef __powerpc64__ + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", +#else + DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n", +#endif + fp->tpa_queue_used); +#endif cnt = 1000; smp_rmb(); - while (BNX2X_HAS_TX_WORK(fp)) { - - if (!netif_running(bp->dev)) - bnx2x_tx_int(fp, 1000); - + while (bnx2x_has_work(fp)) { + msleep(1); if (!cnt) { BNX2X_ERR("timeout waiting for queue[%d]\n", i); @@ -6645,13 +6584,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) #endif } cnt--; - msleep(1); smp_rmb(); } } - /* Give HW time to discard old tx messages */ - msleep(1); + /* Wait until all slow path tasks complete */ + cnt = 1000; + while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--) + msleep(1); for_each_queue(bp, i) napi_disable(&bnx2x_fp(bp, i, napi)); @@ -6661,79 +6601,52 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Release IRQs */ bnx2x_free_irq(bp); - if (unload_mode == UNLOAD_NORMAL) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - else if (bp->flags & NO_WOL_FLAG) { + if (bp->flags & NO_WOL_FLAG) reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - if (CHIP_IS_E1H(bp)) - REG_WR(bp, MISC_REG_E1HMF_MODE, 0); - } else if (bp->wol) { - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + else if (bp->wol) { + u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; u8 *mac_addr = bp->dev->dev_addr; u32 val; + /* The mac address is written to entries 1-4 to preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, + val); reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; } else reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - if (CHIP_IS_E1(bp)) { - struct mac_configuration_cmd *config = - bnx2x_sp(bp, mcast_config); - - bnx2x_set_mac_addr_e1(bp, 0); - - for (i = 0; i < config->hdr.length_6b; i++) - CAM_INVALIDATE(config->config_table[i]); - - config->hdr.length_6b = i; - if (CHIP_REV_IS_SLOW(bp)) - config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); - else - config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); - config->hdr.client_id = BP_CL_ID(bp); - config->hdr.reserved1 = 0; - - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mcast_config)), - U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); - - } else { /* E1H */ - bnx2x_set_mac_addr_e1h(bp, 0); - - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); - } - - if (CHIP_IS_E1H(bp)) - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - /* Close multi and leading connections Completions for ramrods are collected in a synchronous way */ for_each_nondefault_queue(bp, i) if (bnx2x_stop_multi(bp, i)) goto unload_error; - rc = bnx2x_stop_leading(bp); - if (rc) { - BNX2X_ERR("Stop leading failed!\n"); + if (CHIP_IS_E1H(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); + + bnx2x_stop_leading(bp); #ifdef BNX2X_STOP_ON_ERROR + /* If ramrod completion timed out - break here! */ + if (bp->panic) { + BNX2X_ERR("Stop leading failed!\n"); return -EBUSY; -#else - goto unload_error; + } #endif + + if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || + (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { + DP(NETIF_MSG_IFDOWN, "failed to close leading properly! " + "state 0x%x fp[0].state 0x%x\n", + bp->state, bp->fp[0].state); } unload_error: @@ -6743,12 +6656,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", load_count[0], load_count[1], load_count[2]); load_count[0]--; - load_count[1 + port]--; + load_count[1 + BP_PORT(bp)]--; DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", load_count[0], load_count[1], load_count[2]); if (load_count[0] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[1 + port] == 0) + else if (load_count[1 + BP_PORT(bp)] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; else reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; @@ -6768,7 +6681,8 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_rx_sge_range(bp, bp->fp + i, + RX_SGE_CNT*NUM_RX_SGE_PAGES); bnx2x_free_mem(bp); bp->state = BNX2X_STATE_CLOSED; @@ -6819,93 +6733,56 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) /* Check if it is the UNDI driver * UNDI driver initializes CID offset for normal bell to 0x7 */ - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); if (val == 0x7) { u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* save our func */ + /* save our func and fw_seq */ int func = BP_FUNC(bp); - u32 swap_en; - u32 swap_val; + u16 fw_seq = bp->fw_seq; BNX2X_DEV_INFO("UNDI is active! reset device\n"); /* try unload UNDI on port 0 */ bp->func = 0; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); + bp->fw_seq = (SHMEM_RD(bp, + func_mb[bp->func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = bnx2x_fw_command(bp, reset_code); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); /* if UNDI is loaded on the other port */ if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { - /* send "DONE" for previous unload */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - - /* unload UNDI on port 1 */ bp->func = 1; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - bnx2x_fw_command(bp, reset_code); + bp->fw_seq = (SHMEM_RD(bp, + func_mb[bp->func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + + bnx2x_fw_command(bp, + DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); + bnx2x_fw_command(bp, + DRV_MSG_CODE_UNLOAD_DONE); + + /* restore our func and fw_seq */ + bp->func = func; + bp->fw_seq = fw_seq; } - REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : - HC_REG_CONFIG_0), 0x1000); - - /* close input traffic and wait for it */ - /* Do not rcv packets to BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); - /* Do not direct rcv packets that are not for MCP to - * the BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - /* clear AEU */ - REG_WR(bp, - (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); - msleep(10); - - /* save NIG port swap info */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); /* reset device */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffffff); + 0xd3ffff7f); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); - /* take the NIG out of reset and restore swap values */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - MISC_REGISTERS_RESET_REG_1_RST_NIG); - REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); - REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); - - /* send unload done to the MCP */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - - /* restore our func and fw_seq */ - bp->func = func; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); } - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); } } static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) { u32 val, val2, val3, val4, id; - u16 pmc; /* Get the chip revision id and number. */ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ @@ -6963,16 +6840,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) BNX2X_ERR("This driver needs bc_ver %X but found %X," " please upgrade BC\n", BNX2X_BC_VER, val); } - - if (BP_E1HVN(bp) == 0) { - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); - bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; - } else { - /* no WOL capability for E1HVN != 0 */ - bp->flags |= NO_WOL_FLAG; - } - BNX2X_DEV_INFO("%sWoL capable\n", - (bp->flags & NO_WOL_FLAG) ? "Not " : ""); + BNX2X_DEV_INFO("%sWoL Capable\n", + (bp->flags & NO_WOL_FLAG)? "Not " : ""); val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); @@ -7405,8 +7274,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); - val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & - FUNC_MF_CFG_E1HOV_TAG_MASK); + val = + (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK); if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { bp->e1hov = val; @@ -7454,7 +7324,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) if (BP_NOMCP(bp)) { /* only supposed to happen on emulation/FPGA */ - BNX2X_ERR("warning random MAC workaround active\n"); + BNX2X_ERR("warning rendom MAC workaround active\n"); random_ether_addr(bp->dev->dev_addr); memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); } @@ -7467,8 +7337,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) int func = BP_FUNC(bp); int rc; - /* Disable interrupt handling until HW is initialized */ - atomic_set(&bp->intr_sem, 1); + if (nomcp) + bp->flags |= NO_MCP_FLAG; mutex_init(&bp->port.phy_mutex); @@ -7507,6 +7377,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->tx_ticks = 50; bp->rx_ticks = 25; + bp->stats_ticks = 1000000 & 0xffff00; + bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); bp->current_interval = (poll ? poll : bp->timer_interval); @@ -7756,25 +7628,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2x *bp = netdev_priv(dev); - u8 phy_fw_ver[PHY_FW_VER_LEN]; + char phy_fw_ver[PHY_FW_VER_LEN]; strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); phy_fw_ver[0] = '\0'; if (bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); bnx2x_get_ext_phy_fw_version(&bp->link_params, (bp->state != BNX2X_STATE_CLOSED), phy_fw_ver, PHY_FW_VER_LEN); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); } - snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff), - ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); + snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", + BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION, + BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, + ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); strcpy(info->bus_info, pci_name(bp->pdev)); info->n_stats = BNX2X_NUM_STATS; info->testinfo_len = BNX2X_NUM_TESTS; @@ -8225,7 +8097,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, if (eeprom->magic == 0x00504859) if (bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); rc = bnx2x_flash_download(bp, BP_PORT(bp), bp->link_params.ext_phy_config, (bp->state != BNX2X_STATE_CLOSED), @@ -8237,7 +8109,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, rc |= bnx2x_phy_init(&bp->link_params, &bp->link_vars); } - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); } else /* Only the PMF can access the PHY */ return -EINVAL; @@ -8256,6 +8128,7 @@ static int bnx2x_get_coalesce(struct net_device *dev, coal->rx_coalesce_usecs = bp->rx_ticks; coal->tx_coalesce_usecs = bp->tx_ticks; + coal->stats_block_coalesce_usecs = bp->stats_ticks; return 0; } @@ -8273,12 +8146,44 @@ static int bnx2x_set_coalesce(struct net_device *dev, if (bp->tx_ticks > 0x3000) bp->tx_ticks = 0x3000; + bp->stats_ticks = coal->stats_block_coalesce_usecs; + if (bp->stats_ticks > 0xffff00) + bp->stats_ticks = 0xffff00; + bp->stats_ticks &= 0xffff00; + if (netif_running(dev)) bnx2x_update_coalesce(bp); return 0; } +static int bnx2x_set_flags(struct net_device *dev, u32 data) +{ + struct bnx2x *bp = netdev_priv(dev); + int changed = 0; + int rc = 0; + + if (data & ETH_FLAG_LRO) { + if (!(dev->features & NETIF_F_LRO)) { + dev->features |= NETIF_F_LRO; + bp->flags |= TPA_ENABLE_FLAG; + changed = 1; + } + + } else if (dev->features & NETIF_F_LRO) { + dev->features &= ~NETIF_F_LRO; + bp->flags &= ~TPA_ENABLE_FLAG; + changed = 1; + } + + if (changed && netif_running(dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + + return rc; +} + static void bnx2x_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -8361,7 +8266,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (epause->autoneg) { if (!(bp->port.supported & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "autoneg not supported\n"); + DP(NETIF_MSG_LINK, "Autoneg not supported\n"); return -EINVAL; } @@ -8380,34 +8285,6 @@ static int bnx2x_set_pauseparam(struct net_device *dev, return 0; } -static int bnx2x_set_flags(struct net_device *dev, u32 data) -{ - struct bnx2x *bp = netdev_priv(dev); - int changed = 0; - int rc = 0; - - /* TPA requires Rx CSUM offloading */ - if ((data & ETH_FLAG_LRO) && bp->rx_csum) { - if (!(dev->features & NETIF_F_LRO)) { - dev->features |= NETIF_F_LRO; - bp->flags |= TPA_ENABLE_FLAG; - changed = 1; - } - - } else if (dev->features & NETIF_F_LRO) { - dev->features &= ~NETIF_F_LRO; - bp->flags &= ~TPA_ENABLE_FLAG; - changed = 1; - } - - if (changed && netif_running(dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - } - - return rc; -} - static u32 bnx2x_get_rx_csum(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -8418,19 +8295,9 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev) static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) { struct bnx2x *bp = netdev_priv(dev); - int rc = 0; bp->rx_csum = data; - - /* Disable TPA, when Rx CSUM is disabled. Otherwise all - TPA'ed packets will be discarded due to wrong TCP CSUM */ - if (!data) { - u32 flags = ethtool_op_get_flags(dev); - - rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); - } - - return rc; + return 0; } static int bnx2x_set_tso(struct net_device *dev, u32 data) @@ -8468,7 +8335,6 @@ static int bnx2x_test_registers(struct bnx2x *bp) { int idx, i, rc = -ENODEV; u32 wr_val = 0; - int port = BP_PORT(bp); static const struct { u32 offset0; u32 offset1; @@ -8534,6 +8400,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { u32 offset, mask, save_val, val; + int port = BP_PORT(bp); offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; mask = reg_tbl[i].mask; @@ -8579,17 +8446,16 @@ static int bnx2x_test_memory(struct bnx2x *bp) static const struct { char *name; u32 offset; - u32 e1_mask; - u32 e1h_mask; + u32 mask; } prty_tbl[] = { - { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, - { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, - { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, - { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, - { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, - { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, - - { NULL, 0xffffffff, 0, 0 } + { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, + { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, + { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, + { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, + { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, + { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, + + { NULL, 0xffffffff, 0 } }; if (!netif_running(bp->dev)) @@ -8603,8 +8469,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) /* Check the parity status */ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); - if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || - (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { + if (val & ~(prty_tbl[i].mask)) { DP(NETIF_MSG_HW, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; @@ -8674,15 +8539,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) if (loopback_mode == BNX2X_MAC_LOOPBACK) { bp->link_params.loopback_mode = LOOPBACK_BMAC; - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { bp->link_params.loopback_mode = LOOPBACK_XGXS_10; - bnx2x_acquire_phy_lock(bp); + bnx2x_phy_hw_lock(bp); bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + bnx2x_phy_hw_unlock(bp); /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up); @@ -8906,7 +8771,7 @@ static void bnx2x_self_test(struct net_device *dev, if (!netif_running(dev)) return; - /* offline tests are not supported in MF mode */ + /* offline tests are not suppoerted in MF mode */ if (IS_E1HMF(bp)) etest->flags &= ~ETH_TEST_FL_OFFLINE; @@ -8962,99 +8827,76 @@ static const struct { long offset; int size; u32 flags; -#define STATS_FLAGS_PORT 1 -#define STATS_FLAGS_FUNC 2 - u8 string[ETH_GSTRING_LEN]; + char string[ETH_GSTRING_LEN]; } bnx2x_stats_arr[BNX2X_NUM_STATS] = { -/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), - 8, STATS_FLAGS_FUNC, "rx_bytes" }, - { STATS_OFFSET32(error_bytes_received_hi), - 8, STATS_FLAGS_FUNC, "rx_error_bytes" }, - { STATS_OFFSET32(total_bytes_transmitted_hi), - 8, STATS_FLAGS_FUNC, "tx_bytes" }, - { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), - 8, STATS_FLAGS_PORT, "tx_error_bytes" }, +/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, + { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, + { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, + { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), - 8, STATS_FLAGS_FUNC, "rx_ucast_packets" }, + 8, 1, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), - 8, STATS_FLAGS_FUNC, "rx_mcast_packets" }, + 8, 1, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, STATS_FLAGS_FUNC, "rx_bcast_packets" }, + 8, 1, "rx_bcast_packets" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, STATS_FLAGS_FUNC, "tx_packets" }, + 8, 1, "tx_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), - 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + 8, 0, "tx_mac_errors" }, /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), - 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, + 8, 0, "tx_carrier_errors" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), - 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + 8, 0, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), - 8, STATS_FLAGS_PORT, "rx_align_errors" }, + 8, 0, "rx_align_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + 8, 0, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + 8, 0, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), - 8, STATS_FLAGS_PORT, "tx_deferred" }, + 8, 0, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + 8, 0, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + 8, 0, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), - 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + 8, 0, "tx_total_collisions" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), - 8, STATS_FLAGS_PORT, "rx_fragments" }, -/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), - 8, STATS_FLAGS_PORT, "rx_jabbers" }, + 8, 0, "rx_fragments" }, +/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + 8, 0, "rx_undersize_packets" }, { STATS_OFFSET32(jabber_packets_received), - 4, STATS_FLAGS_FUNC, "rx_oversize_packets" }, + 4, 1, "rx_oversize_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), - 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + 8, 0, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), - 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + 8, 0, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), - 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + 8, 0, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), - 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, + 8, 0, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), - 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + 8, 0, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + 8, 0, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + 8, 0, "tx_1523_to_9022_byte_packets" }, /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_xon_frames" }, + 8, 0, "rx_xon_frames" }, { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_xoff_frames" }, - { STATS_OFFSET32(tx_stat_outxonsent_hi), - 8, STATS_FLAGS_PORT, "tx_xon_frames" }, - { STATS_OFFSET32(tx_stat_outxoffsent_hi), - 8, STATS_FLAGS_PORT, "tx_xoff_frames" }, + 8, 0, "rx_xoff_frames" }, + { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, + { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, - { STATS_OFFSET32(mac_filter_discard), - 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, - { STATS_OFFSET32(no_buff_discard), - 4, STATS_FLAGS_FUNC, "rx_discards" }, - { STATS_OFFSET32(xxoverflow_discard), - 4, STATS_FLAGS_PORT, "rx_fw_discards" }, - { STATS_OFFSET32(brb_drop_hi), - 8, STATS_FLAGS_PORT, "brb_discard" }, - { STATS_OFFSET32(brb_truncate_hi), - 8, STATS_FLAGS_PORT, "brb_truncate" }, -/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt), - 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"}, - { STATS_OFFSET32(rx_skb_alloc_failed), - 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" }, -/* 42 */{ STATS_OFFSET32(hw_csum_err), - 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" } + 8, 0, "rx_mac_ctrl_frames" }, + { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, + { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, + { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, + { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, +/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } }; -#define IS_NOT_E1HMF_STAT(bp, i) \ - (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT)) - static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); @@ -9063,7 +8905,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) switch (stringset) { case ETH_SS_STATS: for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_NOT_E1HMF_STAT(bp, i)) + if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) continue; strcpy(buf + j*ETH_GSTRING_LEN, bnx2x_stats_arr[i].string); @@ -9083,7 +8925,7 @@ static int bnx2x_get_stats_count(struct net_device *dev) int i, num_stats = 0; for (i = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_NOT_E1HMF_STAT(bp, i)) + if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) continue; num_stats++; } @@ -9098,7 +8940,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, int i, j; for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_NOT_E1HMF_STAT(bp, i)) + if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) continue; if (bnx2x_stats_arr[i].size == 0) { @@ -9215,7 +9057,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) PCI_PM_CTRL_PME_STATUS)); if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ + /* delay required during transition out of D3hot */ msleep(20); break; @@ -9262,16 +9104,17 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) bnx2x_update_fpsb_idx(fp); - if (BNX2X_HAS_TX_WORK(fp)) + if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || + (fp->tx_pkt_prod != fp->tx_pkt_cons)) bnx2x_tx_int(fp, budget); - if (BNX2X_HAS_RX_WORK(fp)) + if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) work_done = bnx2x_rx_int(fp, budget); - rmb(); /* BNX2X_HAS_WORK() reads the status block */ + rmb(); /* bnx2x_has_work() reads the status block */ /* must not complete if we consumed full budget */ - if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { + if ((work_done < budget) && !bnx2x_has_work(fp)) { #ifdef BNX2X_STOP_ON_ERROR poll_panic: @@ -9288,7 +9131,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) /* we split the first BD into headers and data BDs - * to ease the pain of our fellow microcode engineers + * to ease the pain of our fellow micocode engineers * we use one mapping for both BDs * So far this has only been observed to happen * in Other Operating Systems(TM) @@ -9395,7 +9238,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, /* Check if LSO packet needs to be copied: 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ int wnd_size = MAX_FETCH_BD - 3; - /* Number of windows to check */ + /* Number of widnows to check */ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; int wnd_idx = 0; int frag_idx = 0; @@ -9497,7 +9340,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); - /* First, check if we need to linearize the skb + /* First, check if we need to linearaize the skb (due to FW restrictions) */ if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { /* Statistics of linearization */ @@ -9506,7 +9349,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " "silently dropping this SKB\n"); dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + return 0; } } @@ -9529,8 +9372,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_bd->general_data = (UNICAST_ADDRESS << ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); - /* header nbd */ - tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT); + tx_bd->general_data |= 1; /* header nbd */ /* remember the first BD of the packet */ tx_buf->first_bd = fp->tx_bd_prod; @@ -9609,7 +9451,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2); + nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); tx_bd->nbd = cpu_to_le16(nbd); tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); @@ -9879,9 +9721,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (netif_running(dev)) { if (CHIP_IS_E1(bp)) - bnx2x_set_mac_addr_e1(bp, 1); + bnx2x_set_mac_addr_e1(bp); else - bnx2x_set_mac_addr_e1h(bp, 1); + bnx2x_set_mac_addr_e1h(bp); } return 0; @@ -9892,7 +9734,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct bnx2x *bp = netdev_priv(dev); - int port = BP_PORT(bp); int err; switch (cmd) { @@ -9908,7 +9749,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EAGAIN; mutex_lock(&bp->port.phy_mutex); - err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr, + err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, DEFAULT_PHY_DEV_ADDR, (data->reg_num & 0x1f), &mii_regval); data->val_out = mii_regval; @@ -9924,7 +9765,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EAGAIN; mutex_lock(&bp->port.phy_mutex); - err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr, + err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, DEFAULT_PHY_DEV_ADDR, (data->reg_num & 0x1f), data->val_in); mutex_unlock(&bp->port.phy_mutex); @@ -10300,7 +10141,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_NORMAL); bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); @@ -10333,7 +10174,7 @@ static int bnx2x_resume(struct pci_dev *pdev) bnx2x_set_power_state(bp, PCI_D0); netif_device_attach(dev); - rc = bnx2x_nic_load(bp, LOAD_OPEN); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); rtnl_unlock(); diff --git a/trunk/drivers/net/bnx2x_reg.h b/trunk/drivers/net/bnx2x_reg.h index a67b0c358ae4..15c9a9946724 100644 --- a/trunk/drivers/net/bnx2x_reg.h +++ b/trunk/drivers/net/bnx2x_reg.h @@ -6,7 +6,7 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * The registers description starts with the register Access type followed + * The registers description starts with the regsister Access type followed * by size in bits. For example [RW 32]. The access types are: * R - Read only * RC - Clear on read @@ -49,7 +49,7 @@ /* [RW 10] Write client 0: Assert pause threshold. */ #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c -/* [R 24] The number of full blocks occupied by port. */ +/* [R 24] The number of full blocks occpied by port. */ #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 /* [RW 1] Reset the design by software. */ #define BRB1_REG_SOFT_RESET 0x600dc @@ -740,7 +740,6 @@ #define HC_REG_ATTN_MSG1_ADDR_L 0x108020 #define HC_REG_ATTN_NUM_P0 0x108038 #define HC_REG_ATTN_NUM_P1 0x10803c -#define HC_REG_COMMAND_REG 0x108180 #define HC_REG_CONFIG_0 0x108000 #define HC_REG_CONFIG_1 0x108004 #define HC_REG_FUNC_NUM_P0 0x1080ac @@ -1373,23 +1372,6 @@ be asserted). */ #define MISC_REG_DRIVER_CONTROL_16 0xa5f0 #define MISC_REG_DRIVER_CONTROL_16_SIZE 2 -/* [RW 32] The following driver registers(1...16) represent 16 drivers and - 32 clients. Each client can be controlled by one driver only. One in each - bit represent that this driver control the appropriate client (Ex: bit 5 - is set means this driver control client number 5). addr1 = set; addr0 = - clear; read from both addresses will give the same result = status. write - to address 1 will set a request to control all the clients that their - appropriate bit (in the write command) is set. if the client is free (the - appropriate bit in all the other drivers is clear) one will be written to - that driver register; if the client isn't free the bit will remain zero. - if the appropriate bit is set (the driver request to gain control on a - client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW - interrupt will be asserted). write to address 0 will set a request to - free all the clients that their appropriate bit (in the write command) is - set. if the appropriate bit is clear (the driver request to free a client - it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will - be asserted). */ -#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 only. */ #define MISC_REG_E1HMF_MODE 0xa5f8 @@ -1412,13 +1394,13 @@ #define MISC_REG_GPIO 0xa490 /* [R 28] this field hold the last information that caused reserved attention. bits [19:0] - address; [22:20] function; [23] reserved; - [27:24] the master that caused the attention - according to the following + [27:24] the master thatcaused the attention - according to the following encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = dbu; 8 = dmae */ #define MISC_REG_GRC_RSV_ATTN 0xa3c0 /* [R 28] this field hold the last information that caused timeout attention. bits [19:0] - address; [22:20] function; [23] reserved; - [27:24] the master that caused the attention - according to the following + [27:24] the master thatcaused the attention - according to the following encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = dbu; 8 = dmae */ #define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 @@ -1695,7 +1677,6 @@ /* [RW 8] init credit counter for port0 in LLH */ #define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 #define NIG_REG_LLH0_XCM_MASK 0x10130 -#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 /* [RW 1] send to BRB1 if no match on any of RMP rules. */ #define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc /* [RW 2] Determine the classification participants. 0: no classification.1: @@ -1746,9 +1727,6 @@ /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure for port0 */ #define NIG_REG_STAT0_BRB_DISCARD 0x105f0 -/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure - for port0 */ -#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that between 1024 and 1522 bytes for port0 */ #define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 @@ -2320,7 +2298,7 @@ /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; -128k */ #define PXP2_REG_RQ_QM_P_SIZE 0x120050 -/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ +/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */ #define PXP2_REG_RQ_RBC_DONE 0x1201b0 /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ @@ -2428,7 +2406,7 @@ /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the buffer reaches this number has_payload will be asserted */ #define PXP2_REG_WR_DMAE_MPS 0x1205ec -/* [RW 10] if Number of entries in dmae fifo will be higher than this +/* [RW 10] if Number of entries in dmae fifo will be higer than this threshold then has_payload indication will be asserted; the default value should be equal to > write MBS size! */ #define PXP2_REG_WR_DMAE_TH 0x120368 @@ -2449,7 +2427,7 @@ /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the buffer reaches this number has_payload will be asserted */ #define PXP2_REG_WR_TSDM_MPS 0x1205d4 -/* [RW 10] if Number of entries in usdmdp fifo will be higher than this +/* [RW 10] if Number of entries in usdmdp fifo will be higer than this threshold then has_payload indication will be asserted; the default value should be equal to > write MBS size! */ #define PXP2_REG_WR_USDMDP_TH 0x120348 @@ -3316,12 +3294,12 @@ #define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 #define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) #define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 -/* [R 1] debug only: This bit indicates whether indicates that external +/* [R 1] debug only: This bit indicates wheter indicates that external buffer was wrapped (oldest data was thrown); Relevant only when ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ #define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 #define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 -/* [R 1] debug only: This bit indicates whether the internal buffer was +/* [R 1] debug only: This bit indicates wheter the internal buffer was wrapped (oldest data was thrown) Relevant only when ~dbg_registers_debug_target=0 (internal buffer) */ #define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 @@ -4966,7 +4944,6 @@ #define EMAC_RX_MODE_PROMISCUOUS (1L<<8) #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) -#define EMAC_TX_MODE_FLOW_EN (1L<<4) #define MISC_REGISTERS_GPIO_0 0 #define MISC_REGISTERS_GPIO_1 1 #define MISC_REGISTERS_GPIO_2 2 @@ -4982,7 +4959,6 @@ #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 #define MISC_REGISTERS_GPIO_SET_POS 8 #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 -#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) #define MISC_REGISTERS_RESET_REG_1_SET 0x584 #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) @@ -5017,9 +4993,7 @@ #define HW_LOCK_MAX_RESOURCE_VALUE 31 #define HW_LOCK_RESOURCE_8072_MDIO 0 #define HW_LOCK_RESOURCE_GPIO 1 -#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 #define HW_LOCK_RESOURCE_SPIO 2 -#define HW_LOCK_RESOURCE_UNDI 5 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) @@ -5170,73 +5144,59 @@ #define GRCBASE_MISC_AEU GRCBASE_MISC -/* offset of configuration space in the pci core register */ +/*the offset of the configuration space in the pci core register*/ #define PCICFG_OFFSET 0x2000 #define PCICFG_VENDOR_ID_OFFSET 0x00 #define PCICFG_DEVICE_ID_OFFSET 0x02 #define PCICFG_COMMAND_OFFSET 0x04 -#define PCICFG_COMMAND_IO_SPACE (1<<0) -#define PCICFG_COMMAND_MEM_SPACE (1<<1) -#define PCICFG_COMMAND_BUS_MASTER (1<<2) -#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) -#define PCICFG_COMMAND_MWI_CYCLES (1<<4) -#define PCICFG_COMMAND_VGA_SNOOP (1<<5) -#define PCICFG_COMMAND_PERR_ENA (1<<6) -#define PCICFG_COMMAND_STEPPING (1<<7) -#define PCICFG_COMMAND_SERR_ENA (1<<8) -#define PCICFG_COMMAND_FAST_B2B (1<<9) -#define PCICFG_COMMAND_INT_DISABLE (1<<10) -#define PCICFG_COMMAND_RESERVED (0x1f<<11) #define PCICFG_STATUS_OFFSET 0x06 -#define PCICFG_REVESION_ID 0x08 +#define PCICFG_REVESION_ID 0x08 #define PCICFG_CACHE_LINE_SIZE 0x0c #define PCICFG_LATENCY_TIMER 0x0d -#define PCICFG_BAR_1_LOW 0x10 -#define PCICFG_BAR_1_HIGH 0x14 -#define PCICFG_BAR_2_LOW 0x18 -#define PCICFG_BAR_2_HIGH 0x1c -#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c #define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e -#define PCICFG_INT_LINE 0x3c -#define PCICFG_INT_PIN 0x3d -#define PCICFG_PM_CAPABILITY 0x48 -#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) -#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) -#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) -#define PCICFG_PM_CAPABILITY_DSI (1<<21) -#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) -#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) -#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) -#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) -#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) -#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) -#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) -#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) -#define PCICFG_PM_CSR_OFFSET 0x4c -#define PCICFG_PM_CSR_STATE (0x3<<0) -#define PCICFG_PM_CSR_PME_ENABLE (1<<8) -#define PCICFG_PM_CSR_PME_STATUS (1<<15) -#define PCICFG_GRC_ADDRESS 0x78 -#define PCICFG_GRC_DATA 0x80 +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 #define PCICFG_DEVICE_CONTROL 0xb4 #define PCICFG_LINK_CONTROL 0xbc +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) + +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) #define BAR_USTRORM_INTMEM 0x400000 #define BAR_CSTRORM_INTMEM 0x410000 #define BAR_XSTRORM_INTMEM 0x420000 #define BAR_TSTRORM_INTMEM 0x430000 -/* for accessing the IGU in case of status block ACK */ #define BAR_IGU_INTMEM 0x440000 #define BAR_DOORBELL_OFFSET 0x800000 #define BAR_ME_REGISTER 0x450000 -/* config_2 offset */ -#define GRC_CONFIG_2_SIZE_REG 0x408 -#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) + +#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */ +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) #define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) #define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) #define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) @@ -5253,11 +5213,11 @@ #define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) #define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) #define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) -#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) -#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) -#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) -#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) -#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) #define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) #define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) #define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) @@ -5274,44 +5234,46 @@ #define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) #define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) #define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) -#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) -#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) /* config_3 offset */ -#define GRC_CONFIG_3_SIZE_REG 0x40c -#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) -#define PCI_CONFIG_3_FORCE_PME (1L<<24) -#define PCI_CONFIG_3_PME_STATUS (1L<<25) -#define PCI_CONFIG_3_PME_ENABLE (1L<<26) -#define PCI_CONFIG_3_PM_STATE (0x3L<<27) -#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) -#define PCI_CONFIG_3_PCI_POWER (1L<<31) +#define GRC_CONFIG_3_SIZE_REG (0x40c) +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) -#define GRC_BAR2_CONFIG 0x4e0 -#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) -#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) -#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 -#define PCI_PM_DATA_A 0x410 -#define PCI_PM_DATA_B 0x414 -#define PCI_ID_VAL1 0x434 -#define PCI_ID_VAL2 0x438 +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) +#define PCI_PM_DATA_A (0x410) +#define PCI_PM_DATA_B (0x414) +#define PCI_ID_VAL1 (0x434) +#define PCI_ID_VAL2 (0x438) #define MDIO_REG_BANK_CL73_IEEEB0 0x0 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 @@ -5560,8 +5522,6 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_GEN_CTRL 0xca10 #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a -#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12 -#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13 #define MDIO_PMA_REG_ROM_VER1 0xca19 #define MDIO_PMA_REG_ROM_VER2 0xca1a #define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b @@ -5616,8 +5576,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_AN_REG_LINK_STATUS 0x8304 #define MDIO_AN_REG_CL37_CL73 0x8370 #define MDIO_AN_REG_CL37_AN 0xffe0 -#define MDIO_AN_REG_CL37_FC_LD 0xffe4 -#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_CL37_FD 0xffe4 #define IGU_FUNC_BASE 0x0400 @@ -5641,13 +5600,4 @@ Theotherbitsarereservedandshouldbezero*/ #define IGU_INT_NOP 2 #define IGU_INT_NOP2 3 -#define COMMAND_REG_INT_ACK 0x0 -#define COMMAND_REG_PROD_UPD 0x4 -#define COMMAND_REG_ATTN_BITS_UPD 0x8 -#define COMMAND_REG_ATTN_BITS_SET 0xc -#define COMMAND_REG_ATTN_BITS_CLR 0x10 -#define COMMAND_REG_COALESCE_NOW 0x14 -#define COMMAND_REG_SIMD_MASK 0x18 -#define COMMAND_REG_SIMD_NOMASK 0x1c - diff --git a/trunk/drivers/net/cpmac.c b/trunk/drivers/net/cpmac.c index ec6b0af3d46b..a7800e559090 100644 --- a/trunk/drivers/net/cpmac.c +++ b/trunk/drivers/net/cpmac.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/net/e1000e/defines.h b/trunk/drivers/net/e1000e/defines.h index 14b0e6cd3b8d..f823b8ba5785 100644 --- a/trunk/drivers/net/e1000e/defines.h +++ b/trunk/drivers/net/e1000e/defines.h @@ -389,7 +389,7 @@ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ /* Transmit Descriptor Control */ diff --git a/trunk/drivers/net/e1000e/e1000.h b/trunk/drivers/net/e1000e/e1000.h index ac4e506b4f88..cf57050d99d8 100644 --- a/trunk/drivers/net/e1000e/e1000.h +++ b/trunk/drivers/net/e1000e/e1000.h @@ -326,7 +326,6 @@ struct e1000_info { #define FLAG_RX_CSUM_ENABLED (1 << 28) #define FLAG_TSO_FORCE (1 << 29) #define FLAG_RX_RESTART_NOW (1 << 30) -#define FLAG_MSI_TEST_FAILED (1 << 31) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/trunk/drivers/net/e1000e/ethtool.c b/trunk/drivers/net/e1000e/ethtool.c index e21c9e0f3738..cf9679f2b7c4 100644 --- a/trunk/drivers/net/e1000e/ethtool.c +++ b/trunk/drivers/net/e1000e/ethtool.c @@ -177,7 +177,7 @@ static u32 e1000_get_link(struct net_device *netdev) u32 status; status = er32(STATUS); - return (status & E1000_STATUS_LU) ? 1 : 0; + return (status & E1000_STATUS_LU); } static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) diff --git a/trunk/drivers/net/e1000e/netdev.c b/trunk/drivers/net/e1000e/netdev.c index d266510c8a94..05b0b2f9c54b 100644 --- a/trunk/drivers/net/e1000e/netdev.c +++ b/trunk/drivers/net/e1000e/netdev.c @@ -510,12 +510,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); - skb_copy_to_linear_data_offset(new_skb, - -NET_IP_ALIGN, - (skb->data - - NET_IP_ALIGN), - (length + - NET_IP_ALIGN)); + memcpy(new_skb->data - NET_IP_ALIGN, + skb->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); /* save the skb in buffer_info as good */ buffer_info->skb = skb; skb = new_skb; @@ -1236,36 +1233,26 @@ static irqreturn_t e1000_intr(int irq, void *data) return IRQ_HANDLED; } -/** - * e1000_request_irq - initialize interrupts - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ static int e1000_request_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; int irq_flags = IRQF_SHARED; int err; - if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= FLAG_MSI_ENABLED; - irq_flags = 0; - } + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + handler = e1000_intr_msi; + irq_flags = 0; } - err = request_irq(adapter->pdev->irq, - ((adapter->flags & FLAG_MSI_ENABLED) ? - &e1000_intr_msi : &e1000_intr), - irq_flags, netdev->name, netdev); + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); if (err) { - if (adapter->flags & FLAG_MSI_ENABLED) { + e_err("Unable to allocate %s interrupt (return: %d)\n", + adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); + if (adapter->flags & FLAG_MSI_ENABLED) pci_disable_msi(adapter->pdev); - adapter->flags &= ~FLAG_MSI_ENABLED; - } - e_err("Unable to allocate interrupt, Error: %d\n", err); } return err; @@ -2604,135 +2591,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) return -ENOMEM; } -/** - * e1000_intr_msi_test - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t e1000_intr_msi_test(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - e_dbg("%s: icr is %08X\n", netdev->name, icr); - if (icr & E1000_ICR_RXSEQ) { - adapter->flags &= ~FLAG_MSI_TEST_FAILED; - wmb(); - } - - return IRQ_HANDLED; -} - -/** - * e1000_test_msi_interrupt - Returns 0 for successful test - * @adapter: board private struct - * - * code flow taken from tg3.c - **/ -static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - int err; - - /* poll_enable hasn't been called yet, so don't need disable */ - /* clear any pending events */ - er32(ICR); - - /* free the real vector and request a test handler */ - e1000_free_irq(adapter); - - /* Assume that the test fails, if it succeeds then the test - * MSI irq handler will unset this flag */ - adapter->flags |= FLAG_MSI_TEST_FAILED; - - err = pci_enable_msi(adapter->pdev); - if (err) - goto msi_test_failed; - - err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, - netdev->name, netdev); - if (err) { - pci_disable_msi(adapter->pdev); - goto msi_test_failed; - } - - wmb(); - - e1000_irq_enable(adapter); - - /* fire an unusual interrupt on the test handler */ - ew32(ICS, E1000_ICS_RXSEQ); - e1e_flush(); - msleep(50); - - e1000_irq_disable(adapter); - - rmb(); - - if (adapter->flags & FLAG_MSI_TEST_FAILED) { - err = -EIO; - e_info("MSI interrupt test failed!\n"); - } - - free_irq(adapter->pdev->irq, netdev); - pci_disable_msi(adapter->pdev); - - if (err == -EIO) - goto msi_test_failed; - - /* okay so the test worked, restore settings */ - e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); -msi_test_failed: - /* restore the original vector, even if it failed */ - e1000_request_irq(adapter); - return err; -} - -/** - * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored - * @adapter: board private struct - * - * code flow taken from tg3.c, called with e1000 interrupts disabled. - **/ -static int e1000_test_msi(struct e1000_adapter *adapter) -{ - int err; - u16 pci_cmd; - - if (!(adapter->flags & FLAG_MSI_ENABLED)) - return 0; - - /* disable SERR in case the MSI write causes a master abort */ - pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); - pci_write_config_word(adapter->pdev, PCI_COMMAND, - pci_cmd & ~PCI_COMMAND_SERR); - - err = e1000_test_msi_interrupt(adapter); - - /* restore previous setting of command word */ - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); - - /* success ! */ - if (!err) - return 0; - - /* EIO means MSI test failed */ - if (err != -EIO) - return err; - - /* back to INTx mode */ - e_warn("MSI interrupt test failed, using legacy interrupt.\n"); - - e1000_free_irq(adapter); - - err = e1000_request_irq(adapter); - - return err; -} - /** * e1000_open - Called when a network interface is made active * @netdev: network interface device structure @@ -2791,19 +2649,6 @@ static int e1000_open(struct net_device *netdev) if (err) goto err_req_irq; - /* - * Work around PCIe errata with MSI interrupts causing some chipsets to - * ignore e1000e MSI messages, which means we need to test our MSI - * interrupt now - */ - { - err = e1000_test_msi(adapter); - if (err) { - e_err("Interrupt allocation failed\n"); - goto err_req_irq; - } - } - /* From here on the code is the same as e1000e_up() */ clear_bit(__E1000_DOWN, &adapter->state); @@ -3210,7 +3055,7 @@ static void e1000_watchdog_task(struct work_struct *work) case SPEED_10: txb2b = 0; netdev->tx_queue_len = 10; - adapter->tx_timeout_factor = 16; + adapter->tx_timeout_factor = 14; break; case SPEED_100: txb2b = 0; @@ -3876,7 +3721,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) struct e1000_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { e_err("Invalid MTU setting\n"); return -EINVAL; diff --git a/trunk/drivers/net/e1000e/param.c b/trunk/drivers/net/e1000e/param.c index ed912e023a72..8effc3107f9a 100644 --- a/trunk/drivers/net/e1000e/param.c +++ b/trunk/drivers/net/e1000e/param.c @@ -324,27 +324,14 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) adapter->itr = 20000; break; default: + e1000_validate_option(&adapter->itr, &opt, + adapter); /* - * Save the setting, because the dynamic bits - * change itr. + * save the setting, because the dynamic bits + * change itr. clear the lower two bits + * because they are used as control */ - if (e1000_validate_option(&adapter->itr, &opt, - adapter) && - (adapter->itr == 3)) { - /* - * In case of invalid user value, - * default to conservative mode. - */ - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - } else { - /* - * Clear the lower two bits because - * they are used as control. - */ - adapter->itr_setting = - adapter->itr & ~3; - } + adapter->itr_setting = adapter->itr & ~3; break; } } else { diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c index 999d69168277..ca6cf6ecb37b 100644 --- a/trunk/drivers/net/gianfar.c +++ b/trunk/drivers/net/gianfar.c @@ -134,7 +134,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); void gfar_halt(struct net_device *dev); +#ifdef CONFIG_PM static void gfar_halt_nodisable(struct net_device *dev); +#endif void gfar_start(struct net_device *dev); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); @@ -629,6 +631,7 @@ static void init_registers(struct net_device *dev) } +#ifdef CONFIG_PM /* Halt the receive and transmit queues */ static void gfar_halt_nodisable(struct net_device *dev) { @@ -654,6 +657,7 @@ static void gfar_halt_nodisable(struct net_device *dev) cpu_relax(); } } +#endif /* Halt the receive and transmit queues */ void gfar_halt(struct net_device *dev) @@ -662,8 +666,6 @@ void gfar_halt(struct net_device *dev) struct gfar __iomem *regs = priv->regs; u32 tempval; - gfar_halt_nodisable(dev); - /* Disable Rx and Tx */ tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); diff --git a/trunk/drivers/net/gianfar_sysfs.c b/trunk/drivers/net/gianfar_sysfs.c index 782c20170082..5116f68e01b9 100644 --- a/trunk/drivers/net/gianfar_sysfs.c +++ b/trunk/drivers/net/gianfar_sysfs.c @@ -33,6 +33,7 @@ #include #include +#include #include "gianfar.h" diff --git a/trunk/drivers/net/ipg.h b/trunk/drivers/net/ipg.h index dd9318f19497..e0e718ab4c2e 100644 --- a/trunk/drivers/net/ipg.h +++ b/trunk/drivers/net/ipg.h @@ -7,6 +7,7 @@ #ifndef __LINUX_IPG_H #define __LINUX_IPG_H +#include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include /* diff --git a/trunk/drivers/net/ixgbe/ixgbe_82598.c b/trunk/drivers/net/ixgbe/ixgbe_82598.c index f96358b641af..2f38e847e2cd 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_82598.c +++ b/trunk/drivers/net/ixgbe/ixgbe_82598.c @@ -190,7 +190,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598EB_CX4: - case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82598AT_DUAL_PORT: diff --git a/trunk/drivers/net/ixgbe/ixgbe_main.c b/trunk/drivers/net/ixgbe/ixgbe_main.c index 34bca16d48a6..e5f3da8468cc 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ixgbe/ixgbe_main.c @@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "1.3.18-k4" +#define DRV_VERSION "1.3.18-k2" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; @@ -72,8 +72,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), - board_82598 }, /* required last entry */ {0, } diff --git a/trunk/drivers/net/ixgbe/ixgbe_type.h b/trunk/drivers/net/ixgbe/ixgbe_type.h index c0282a223df3..1ad7cb9c25a8 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_type.h +++ b/trunk/drivers/net/ixgbe/ixgbe_type.h @@ -39,7 +39,6 @@ #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD -#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC /* General Registers */ #define IXGBE_CTRL 0x00000 diff --git a/trunk/drivers/net/loopback.c b/trunk/drivers/net/loopback.c index 3b43bfd85a0f..49f6bc036a92 100644 --- a/trunk/drivers/net/loopback.c +++ b/trunk/drivers/net/loopback.c @@ -64,6 +64,68 @@ struct pcpu_lstats { unsigned long bytes; }; +/* KISS: just allocate small chunks and copy bits. + * + * So, in fact, this is documentation, explaining what we expect + * of largesending device modulo TCP checksum, which is ignored for loopback. + */ + +#ifdef LOOPBACK_TSO +static void emulate_large_send_offload(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + + (iph->ihl * 4)); + unsigned int doffset = (iph->ihl + th->doff) * 4; + unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; + unsigned int offset = 0; + u32 seq = ntohl(th->seq); + u16 id = ntohs(iph->id); + + while (offset + doffset < skb->len) { + unsigned int frag_size = min(mtu, skb->len - offset) - doffset; + struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC); + + if (!nskb) + break; + skb_reserve(nskb, 32); + skb_set_mac_header(nskb, -ETH_HLEN); + skb_reset_network_header(nskb); + iph = ip_hdr(nskb); + skb_copy_to_linear_data(nskb, skb_network_header(skb), + doffset); + if (skb_copy_bits(skb, + doffset + offset, + nskb->data + doffset, + frag_size)) + BUG(); + skb_put(nskb, doffset + frag_size); + nskb->ip_summed = CHECKSUM_UNNECESSARY; + nskb->dev = skb->dev; + nskb->priority = skb->priority; + nskb->protocol = skb->protocol; + nskb->dst = dst_clone(skb->dst); + memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); + nskb->pkt_type = skb->pkt_type; + + th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); + iph->tot_len = htons(frag_size + doffset); + iph->id = htons(id); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl); + th->seq = htonl(seq); + if (offset + doffset + frag_size < skb->len) + th->fin = th->psh = 0; + netif_rx(nskb); + offset += frag_size; + seq += frag_size; + id++; + } + + dev_kfree_skb(skb); +} +#endif /* LOOPBACK_TSO */ + /* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). @@ -75,6 +137,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) skb_orphan(skb); skb->protocol = eth_type_trans(skb,dev); +#ifndef LOOPBACK_MUST_CHECKSUM + skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif #ifdef LOOPBACK_TSO if (skb_is_gso(skb)) { @@ -169,7 +234,9 @@ static void loopback_setup(struct net_device *dev) dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST +#ifdef LOOPBACK_TSO | NETIF_F_TSO +#endif | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX diff --git a/trunk/drivers/net/myri10ge/myri10ge.c b/trunk/drivers/net/myri10ge/myri10ge.c index 5d76cd09e246..f1de38f8b742 100644 --- a/trunk/drivers/net/myri10ge/myri10ge.c +++ b/trunk/drivers/net/myri10ge/myri10ge.c @@ -3548,11 +3548,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) /* try to load the slice aware rss firmware */ old_fw = mgp->fw_name; - if (myri10ge_fw_name != NULL) { - dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", - myri10ge_fw_name); - mgp->fw_name = myri10ge_fw_name; - } else if (old_fw == myri10ge_fw_aligned) + if (old_fw == myri10ge_fw_aligned) mgp->fw_name = myri10ge_fw_rss_aligned; else mgp->fw_name = myri10ge_fw_rss_unaligned; diff --git a/trunk/drivers/net/ne.c b/trunk/drivers/net/ne.c index fa3ceca4e15c..42443d697423 100644 --- a/trunk/drivers/net/ne.c +++ b/trunk/drivers/net/ne.c @@ -118,7 +118,7 @@ bad_clone_list[] __initdata = { {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ -#ifdef CONFIG_MACH_TX49XX +#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ #endif {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ @@ -142,7 +142,7 @@ bad_clone_list[] __initdata = { #if defined(CONFIG_PLAT_MAPPI) # define DCR_VAL 0x4b #elif defined(CONFIG_PLAT_OAKS32R) || \ - defined(CONFIG_MACH_TX49XX) + defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) # define DCR_VAL 0x48 /* 8-bit mode */ #else # define DCR_VAL 0x49 diff --git a/trunk/drivers/net/netxen/netxen_nic.h b/trunk/drivers/net/netxen/netxen_nic.h index ab871df6b1db..93a7b9b668d5 100644 --- a/trunk/drivers/net/netxen/netxen_nic.h +++ b/trunk/drivers/net/netxen/netxen_nic.h @@ -66,8 +66,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 11 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.11" +#define _NETXEN_NIC_LINUX_SUBVERSION 0 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.0" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) @@ -1615,8 +1615,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter) int netxen_is_flash_supported(struct netxen_adapter *adapter); -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]); extern void netxen_change_ringparam(struct netxen_adapter *adapter); extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); diff --git a/trunk/drivers/net/netxen/netxen_nic_hw.c b/trunk/drivers/net/netxen/netxen_nic_hw.c index 84978f80f396..9aa20f961618 100644 --- a/trunk/drivers/net/netxen/netxen_nic_hw.c +++ b/trunk/drivers/net/netxen/netxen_nic_hw.c @@ -733,56 +733,31 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, return 0; } -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]) { - __le32 *pmac = (__le32 *) mac; - u32 offset; + __le32 *pmac = (__le32 *) & mac[0]; - offset = NETXEN_USER_START + - offsetof(struct netxen_new_user_info, mac_addr) + - adapter->portnum * sizeof(u64); - - if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) + if (netxen_get_flash_block(adapter, + NETXEN_USER_START + + offsetof(struct netxen_new_user_info, + mac_addr), + FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) { return -1; - + } if (*mac == cpu_to_le64(~0ULL)) { - - offset = NETXEN_USER_START_OLD + - offsetof(struct netxen_user_old_info, mac_addr) + - adapter->portnum * sizeof(u64); - if (netxen_get_flash_block(adapter, - offset, sizeof(u64), pmac) == -1) + NETXEN_USER_START_OLD + + offsetof(struct netxen_user_old_info, + mac_addr), + FLASH_NUM_PORTS * sizeof(u64), + pmac) == -1) return -1; - if (*mac == cpu_to_le64(~0ULL)) return -1; } return 0; } -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) -{ - uint32_t crbaddr, mac_hi, mac_lo; - int pci_func = adapter->ahw.pci_func; - - crbaddr = CRB_MAC_BLOCK_START + - (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); - - adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4); - adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4); - - mac_hi = cpu_to_le32(mac_hi); - mac_lo = cpu_to_le32(mac_lo); - - if (pci_func & 1) - *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16)); - else - *mac = ((mac_lo) | ((u64)mac_hi << 32)); - - return 0; -} - #define CRB_WIN_LOCK_TIMEOUT 100000000 static int crb_win_lock(struct netxen_adapter *adapter) @@ -2208,10 +2183,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) if (adapter->portnum == 0) { get_brd_name_by_type(board_info->board_type, brd_name); - printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", - brd_name, serial_num, adapter->ahw.revision_id); - printk(KERN_INFO "NetXen Firmware version %d.%d.%d\n", - fw_major, fw_minor, fw_build); + printk("NetXen %s Board S/N %s Chip id 0x%x\n", + brd_name, serial_num, board_info->chip_id); + printk("NetXen Firmware version %d.%d.%d\n", fw_major, + fw_minor, fw_build); } if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) < diff --git a/trunk/drivers/net/netxen/netxen_nic_init.c b/trunk/drivers/net/netxen/netxen_nic_init.c index 5bba675d0504..519fc860e17e 100644 --- a/trunk/drivers/net/netxen/netxen_nic_init.c +++ b/trunk/drivers/net/netxen/netxen_nic_init.c @@ -1079,12 +1079,10 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) void netxen_free_adapter_offload(struct netxen_adapter *adapter) { - int i = 100; - - if (!adapter->dummy_dma.addr) - return; + int i; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (adapter->dummy_dma.addr) { + i = 100; do { if (dma_watchdog_shutdown_request(adapter) == 1) break; @@ -1092,17 +1090,17 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter) if (dma_watchdog_shutdown_poll_result(adapter) == 1) break; } while (--i); - } - if (i) { - pci_free_consistent(adapter->pdev, - NETXEN_HOST_DUMMY_DMA_SIZE, - adapter->dummy_dma.addr, - adapter->dummy_dma.phys_addr); - adapter->dummy_dma.addr = NULL; - } else { - printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", - adapter->netdev->name); + if (i) { + pci_free_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + adapter->dummy_dma.addr, + adapter->dummy_dma.phys_addr); + adapter->dummy_dma.addr = NULL; + } else { + printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", + adapter->netdev->name); + } } } diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c index 32bb47adbe39..7615c715e66e 100644 --- a/trunk/drivers/net/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/netxen/netxen_nic_main.c @@ -149,18 +149,76 @@ static uint32_t msi_tgt_status[8] = { static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; -static inline void netxen_nic_disable_int(struct netxen_adapter *adapter) +static void netxen_nic_disable_int(struct netxen_adapter *adapter) { - adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0); + u32 mask = 0x7ff; + int retries = 32; + int pci_fn = adapter->ahw.pci_func; + + if (adapter->msi_mode != MSI_MODE_MULTIFUNC) + adapter->pci_write_normalize(adapter, + adapter->crb_intr_mask, 0); + + if (adapter->intr_scheme != -1 && + adapter->intr_scheme != INTR_SCHEME_PERPORT) + adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); + + if (!NETXEN_IS_MSI_FAMILY(adapter)) { + do { + adapter->pci_write_immediate(adapter, + adapter->legacy_intr.tgt_status_reg, + 0xffffffff); + mask = adapter->pci_read_immediate(adapter, + ISR_INT_VECTOR); + if (!(mask & 0x80)) + break; + udelay(10); + } while (--retries); + + if (!retries) { + printk(KERN_NOTICE "%s: Failed to disable interrupt\n", + netxen_nic_driver_name); + } + } else { + if (adapter->msi_mode == MSI_MODE_MULTIFUNC) { + adapter->pci_write_immediate(adapter, + msi_tgt_status[pci_fn], 0xffffffff); + } + } } -static inline void netxen_nic_enable_int(struct netxen_adapter *adapter) +static void netxen_nic_enable_int(struct netxen_adapter *adapter) { + u32 mask; + + if (adapter->intr_scheme != -1 && + adapter->intr_scheme != INTR_SCHEME_PERPORT) { + switch (adapter->ahw.board_type) { + case NETXEN_NIC_GBE: + mask = 0x77b; + break; + case NETXEN_NIC_XGBE: + mask = 0x77f; + break; + default: + mask = 0x7ff; + break; + } + + adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); + } + adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); - if (!NETXEN_IS_MSI_FAMILY(adapter)) - adapter->pci_write_immediate(adapter, - adapter->legacy_intr.tgt_mask_reg, 0xfbff); + if (!NETXEN_IS_MSI_FAMILY(adapter)) { + mask = 0xbff; + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) + adapter->pci_write_immediate(adapter, + adapter->legacy_intr.tgt_mask_reg, mask); + else + adapter->pci_write_normalize(adapter, + CRB_INT_VECTOR, 0); + } } static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) @@ -443,44 +501,6 @@ static void netxen_init_msix_entries(struct netxen_adapter *adapter) adapter->msix_entries[i].entry = i; } -static int -netxen_read_mac_addr(struct netxen_adapter *adapter) -{ - int i; - unsigned char *p; - __le64 mac_addr; - DECLARE_MAC_BUF(mac); - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - if (netxen_is_flash_supported(adapter) != 0) - return -EIO; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } else { - if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } - - p = (unsigned char *)&mac_addr; - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); - - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - - /* set station address */ - - if (!is_valid_ether_addr(netdev->perm_addr)) { - dev_warn(&pdev->dev, "Bad MAC address %s.\n", - print_mac(mac, netdev->dev_addr)); - } else - adapter->macaddr_set(adapter, netdev->dev_addr); - - return 0; -} - /* * netxen_nic_probe() * @@ -509,8 +529,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; int i = 0, err; int first_driver, first_boot; + __le64 mac_addr[FLASH_NUM_PORTS + 1]; u32 val; int pci_func_id = PCI_FUNC(pdev->devfn); + DECLARE_MAC_BUF(mac); struct netxen_legacy_intr_set *legacy_intrp; uint8_t revision_id; @@ -523,13 +545,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { - printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" - "will not be enabled.\n", - NX_P3_A0, NX_P3_B1); - return -ENODEV; - } - if ((err = pci_enable_device(pdev))) return err; @@ -883,14 +898,34 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_msi; init_timer(&adapter->watchdog_timer); + adapter->ahw.linkup = 0; adapter->watchdog_timer.function = &netxen_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); - err = netxen_read_mac_addr(adapter); - if (err) - dev_warn(&pdev->dev, "failed to read mac addr\n"); + if (netxen_is_flash_supported(adapter) == 0 && + netxen_get_flash_mac_addr(adapter, mac_addr) == 0) { + unsigned char *p; + + p = (unsigned char *)&mac_addr[adapter->portnum]; + netdev->dev_addr[0] = *(p + 5); + netdev->dev_addr[1] = *(p + 4); + netdev->dev_addr[2] = *(p + 3); + netdev->dev_addr[3] = *(p + 2); + netdev->dev_addr[4] = *(p + 1); + netdev->dev_addr[5] = *(p + 0); + + memcpy(netdev->perm_addr, netdev->dev_addr, + netdev->addr_len); + if (!is_valid_ether_addr(netdev->perm_addr)) { + printk(KERN_ERR "%s: Bad MAC address %s.\n", + netxen_nic_driver_name, + print_mac(mac, netdev->dev_addr)); + } else { + adapter->macaddr_set(adapter, netdev->dev_addr); + } + } netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -965,7 +1000,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { netxen_free_hw_resources(adapter); - netxen_release_rx_buffers(adapter); netxen_free_sw_resources(adapter); } @@ -1035,15 +1069,6 @@ static int netxen_nic_open(struct net_device *netdev) goto err_out_free_sw; } - if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) || - (adapter->intr_scheme != INTR_SCHEME_PERPORT)) { - printk(KERN_ERR "%s: Firmware interrupt scheme is " - "incompatible with driver\n", - netdev->name); - adapter->driver_mismatch = 1; - goto err_out_free_hw; - } - if (adapter->fw_major < 4) { adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum]; @@ -1069,7 +1094,7 @@ static int netxen_nic_open(struct net_device *netdev) flags, netdev->name, adapter); if (err) { printk(KERN_ERR "request_irq failed with: %d\n", err); - goto err_out_free_rxbuf; + goto err_out_free_hw; } adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; @@ -1091,7 +1116,6 @@ static int netxen_nic_open(struct net_device *netdev) if (adapter->set_mtu) adapter->set_mtu(adapter, netdev->mtu); - adapter->ahw.linkup = 0; mod_timer(&adapter->watchdog_timer, jiffies); napi_enable(&adapter->napi); @@ -1103,8 +1127,6 @@ static int netxen_nic_open(struct net_device *netdev) err_out_free_irq: free_irq(adapter->irq, adapter); -err_out_free_rxbuf: - netxen_release_rx_buffers(adapter); err_out_free_hw: netxen_free_hw_resources(adapter); err_out_free_sw: @@ -1130,8 +1152,10 @@ static int netxen_nic_close(struct net_device *netdev) netxen_release_tx_buffers(adapter); - FLUSH_SCHEDULED_WORK(); - del_timer_sync(&adapter->watchdog_timer); + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { + FLUSH_SCHEDULED_WORK(); + del_timer_sync(&adapter->watchdog_timer); + } return 0; } @@ -1434,8 +1458,7 @@ void netxen_watchdog_task(struct work_struct *work) netxen_nic_handle_phy_intr(adapter); - if (netif_running(adapter->netdev)) - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); } static void netxen_tx_timeout(struct net_device *netdev) @@ -1495,9 +1518,18 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) return stats; } +static inline void +netxen_handle_int(struct netxen_adapter *adapter) +{ + netxen_nic_disable_int(adapter); + napi_schedule(&adapter->napi); +} + static irqreturn_t netxen_intr(int irq, void *data) { struct netxen_adapter *adapter = data; + u32 our_int = 0; + u32 status = 0; status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); @@ -1512,32 +1544,22 @@ static irqreturn_t netxen_intr(int irq, void *data) if (!ISR_LEGACY_INT_TRIGGERED(status)) return IRQ_NONE; - } else { - unsigned long our_int = 0; + } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); - /* not our interrupt */ - if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) + if ((our_int & (0x80 << adapter->portnum)) == 0) return IRQ_NONE; - /* claim interrupt */ - adapter->pci_write_normalize(adapter, - CRB_INT_VECTOR, (our_int & 0xffffffff)); + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { + /* claim interrupt */ + adapter->pci_write_normalize(adapter, + CRB_INT_VECTOR, + our_int & ~((u32)(0x80 << adapter->portnum))); + } } - /* clear interrupt */ - if (adapter->fw_major < 4) - netxen_nic_disable_int(adapter); - - adapter->pci_write_immediate(adapter, - adapter->legacy_intr.tgt_status_reg, - 0xffffffff); - /* read twice to ensure write is flushed */ - adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); - adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); - - napi_schedule(&adapter->napi); + netxen_handle_int(adapter); return IRQ_HANDLED; } @@ -1546,11 +1568,7 @@ static irqreturn_t netxen_msi_intr(int irq, void *data) { struct netxen_adapter *adapter = data; - /* clear interrupt */ - adapter->pci_write_immediate(adapter, - msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); - - napi_schedule(&adapter->napi); + netxen_handle_int(adapter); return IRQ_HANDLED; } diff --git a/trunk/drivers/net/netxen/netxen_nic_phan_reg.h b/trunk/drivers/net/netxen/netxen_nic_phan_reg.h index b293adcc95ab..83e5ee57bfef 100644 --- a/trunk/drivers/net/netxen/netxen_nic_phan_reg.h +++ b/trunk/drivers/net/netxen/netxen_nic_phan_reg.h @@ -125,8 +125,6 @@ #define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) #define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8) -#define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0) - /* * capabilities register, can be used to selectively enable/disable features * for backward compability diff --git a/trunk/drivers/net/ppp_mppe.c b/trunk/drivers/net/ppp_mppe.c index 88f03c9e9403..b35d79449500 100644 --- a/trunk/drivers/net/ppp_mppe.c +++ b/trunk/drivers/net/ppp_mppe.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/drivers/net/pppol2tp.c b/trunk/drivers/net/pppol2tp.c index ff175e8f36b2..f9298827a76c 100644 --- a/trunk/drivers/net/pppol2tp.c +++ b/trunk/drivers/net/pppol2tp.c @@ -61,6 +61,7 @@ */ #include +#include #include #include #include diff --git a/trunk/drivers/net/r6040.c b/trunk/drivers/net/r6040.c index 5d86281d9363..6531ff565c54 100644 --- a/trunk/drivers/net/r6040.c +++ b/trunk/drivers/net/r6040.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/net/sh_eth.c b/trunk/drivers/net/sh_eth.c index 1c370e6aa641..25e62cf58d3a 100644 --- a/trunk/drivers/net/sh_eth.c +++ b/trunk/drivers/net/sh_eth.c @@ -20,6 +20,7 @@ * the file called "COPYING". */ +#include #include #include #include diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index e24b25ca1c69..7d29edcd40b4 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -665,16 +666,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) if (hw->chip_id != CHIP_ID_YUKON_EC) { if (hw->chip_id == CHIP_ID_YUKON_EC_U) { - /* select page 2 to access MAC control register */ - gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); - ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* enable Power Down */ ctrl |= PHY_M_PC_POW_D_ENA; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); - - /* set page register back to 0 */ - gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } /* set IEEE compatible Power Down Mode (dev. #4.99) */ diff --git a/trunk/drivers/net/tehuti.h b/trunk/drivers/net/tehuti.h index 7db48f1cd949..c66dfc9ec1ec 100644 --- a/trunk/drivers/net/tehuti.h +++ b/trunk/drivers/net/tehuti.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index 71d2c5cfdad9..d2439b85a790 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -66,8 +66,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.94" -#define DRV_MODULE_RELDATE "August 14, 2008" +#define DRV_MODULE_VERSION "3.93" +#define DRV_MODULE_RELDATE "May 22, 2008" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -536,7 +536,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) return 0; switch (locknum) { - case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: break; default: @@ -574,7 +573,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) return; switch (locknum) { - case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: break; default: @@ -1019,44 +1017,16 @@ static void tg3_mdio_fini(struct tg3 *tp) } } -/* tp->lock is held. */ -static inline void tg3_generate_fw_event(struct tg3 *tp) -{ - u32 val; - - val = tr32(GRC_RX_CPU_EVENT); - val |= GRC_RX_CPU_DRIVER_EVENT; - tw32_f(GRC_RX_CPU_EVENT, val); - - tp->last_event_jiffies = jiffies; -} - -#define TG3_FW_EVENT_TIMEOUT_USEC 2500 - /* tp->lock is held. */ static void tg3_wait_for_event_ack(struct tg3 *tp) { int i; - unsigned int delay_cnt; - long time_remain; - - /* If enough time has passed, no wait is necessary. */ - time_remain = (long)(tp->last_event_jiffies + 1 + - usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - - (long)jiffies; - if (time_remain < 0) - return; - /* Check if we can shorten the wait time. */ - delay_cnt = jiffies_to_usecs(time_remain); - if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) - delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; - delay_cnt = (delay_cnt >> 3) + 1; - - for (i = 0; i < delay_cnt; i++) { + /* Wait for up to 2.5 milliseconds */ + for (i = 0; i < 250000; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; - udelay(8); + udelay(10); } } @@ -1105,7 +1075,9 @@ static void tg3_ump_link_report(struct tg3 *tp) val = 0; tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); - tg3_generate_fw_event(tp); + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); } static void tg3_link_report(struct tg3 *tp) @@ -2152,13 +2124,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - mac_mode |= tp->mac_mode & - (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (mac_mode & MAC_MODE_APE_TX_EN) - mac_mode |= MAC_MODE_TDE_ENABLE; - } - tw32_f(MAC_MODE, mac_mode); udelay(100); @@ -5528,7 +5493,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event) return; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (!(apedata & APE_FW_STATUS_READY)) + if (apedata != APE_FW_STATUS_READY) return; /* Wait for up to 1 millisecond for APE to service previous event. */ @@ -5795,8 +5760,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_mdio_stop(tp); - tg3_ape_lock(tp, TG3_APE_LOCK_GRC); - /* No matching tg3_nvram_unlock() after this because * chip reset below will undo the nvram lock. */ @@ -5945,19 +5908,12 @@ static int tg3_chip_reset(struct tg3 *tp) } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_GMII; tw32_f(MAC_MODE, tp->mac_mode); - } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { - tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); - if (tp->mac_mode & MAC_MODE_APE_TX_EN) - tp->mac_mode |= MAC_MODE_TDE_ENABLE; - tw32_f(MAC_MODE, tp->mac_mode); } else tw32_f(MAC_MODE, 0); udelay(40); tg3_mdio_start(tp); - tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); - err = tg3_poll_fw(tp); if (err) return err; @@ -5979,7 +5935,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; - tp->last_event_jiffies = jiffies; if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; } @@ -5993,12 +5948,15 @@ static void tg3_stop_fw(struct tg3 *tp) { if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + u32 val; + /* Wait for RX cpu to ACK the previous event. */ tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - - tg3_generate_fw_event(tp); + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32(GRC_RX_CPU_EVENT, val); /* Wait for RX cpu to ACK this event. */ tg3_wait_for_event_ack(tp); @@ -7448,11 +7406,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(10); } - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; - else - tp->mac_mode = 0; - tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && @@ -7886,8 +7840,9 @@ static void tg3_timer(unsigned long __opaque) * resets. */ if (!--tp->asf_counter) { - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { + if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + u32 val; + tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, @@ -7895,8 +7850,9 @@ static void tg3_timer(unsigned long __opaque) tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); /* 5 seconds timeout */ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); - - tg3_generate_fw_event(tp); + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); } tp->asf_counter = tp->asf_multiplier; } @@ -8466,11 +8422,6 @@ static inline unsigned long get_stat64(tg3_stat64_t *val) return ret; } -static inline u64 get_estat64(tg3_stat64_t *val) -{ - return ((u64)val->high << 32) | ((u64)val->low); -} - static unsigned long calc_crc_errors(struct tg3 *tp) { struct tg3_hw_stats *hw_stats = tp->hw_stats; @@ -8499,7 +8450,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp) #define ESTAT_ADD(member) \ estats->member = old_estats->member + \ - get_estat64(&hw_stats->member) + get_stat64(&hw_stats->member) static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) { @@ -12465,13 +12416,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->misc_host_ctrl); } - /* Preserve the APE MAC_MODE bits */ - if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) - tp->mac_mode = tr32(MAC_MODE) | - MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; - else - tp->mac_mode = TG3_DEF_MAC_MODE; - /* these are limited to 10/100 only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || @@ -13331,6 +13275,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->pdev = pdev; tp->dev = dev; tp->pm_cap = pm_cap; + tp->mac_mode = TG3_DEF_MAC_MODE; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; diff --git a/trunk/drivers/net/tg3.h b/trunk/drivers/net/tg3.h index f5b8cab8d4b5..df07842172b7 100644 --- a/trunk/drivers/net/tg3.h +++ b/trunk/drivers/net/tg3.h @@ -325,8 +325,6 @@ #define MAC_MODE_TDE_ENABLE 0x00200000 #define MAC_MODE_RDE_ENABLE 0x00400000 #define MAC_MODE_FHDE_ENABLE 0x00800000 -#define MAC_MODE_APE_RX_EN 0x08000000 -#define MAC_MODE_APE_TX_EN 0x10000000 #define MAC_STATUS 0x00000404 #define MAC_STATUS_PCS_SYNCED 0x00000001 #define MAC_STATUS_SIGNAL_DET 0x00000002 @@ -1891,7 +1889,6 @@ #define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 /* APE convenience enumerations. */ -#define TG3_APE_LOCK_GRC 1 #define TG3_APE_LOCK_MEM 4 #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 @@ -2432,10 +2429,7 @@ struct tg3 { struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; - union { unsigned long phy_crc_errors; - unsigned long last_event_jiffies; - }; u32 rx_offset; u32 tg3_flags; diff --git a/trunk/drivers/net/tlan.c b/trunk/drivers/net/tlan.c index ec871f646766..85246ed7cb9c 100644 --- a/trunk/drivers/net/tlan.c +++ b/trunk/drivers/net/tlan.c @@ -360,8 +360,8 @@ TLan_GetSKB( const struct tlan_list_tag *tag) { unsigned long addr; - addr = tag->buffer[9].address; - addr |= (tag->buffer[8].address << 16) << 16; + addr = tag->buffer[8].address; + addr |= (tag->buffer[9].address << 16) << 16; return (struct sk_buff *) addr; } @@ -1984,6 +1984,7 @@ static void TLan_ResetLists( struct net_device *dev ) TLanList *list; dma_addr_t list_phys; struct sk_buff *skb; + void *t = NULL; priv->txHead = 0; priv->txTail = 0; @@ -2021,8 +2022,7 @@ static void TLan_ResetLists( struct net_device *dev ) } skb_reserve( skb, NET_IP_ALIGN ); - list->buffer[0].address = pci_map_single(priv->pciDev, - skb->data, + list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); TLan_StoreSKB(list, skb); diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 6daea0c91862..e6bbc639c2d0 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -358,66 +358,6 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) return mask; } -/* prepad is the amount to reserve at front. len is length after that. - * linear is a hint as to how much to copy (usually headers). */ -static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, - gfp_t gfp) -{ - struct sk_buff *skb; - unsigned int i; - - skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN); - if (skb) { - skb_reserve(skb, prepad); - skb_put(skb, len); - return skb; - } - - /* Under a page? Don't bother with paged skb. */ - if (prepad + len < PAGE_SIZE) - return NULL; - - /* Start with a normal skb, and add pages. */ - skb = alloc_skb(prepad + linear, gfp); - if (!skb) - return NULL; - - skb_reserve(skb, prepad); - skb_put(skb, linear); - - len -= linear; - - for (i = 0; i < MAX_SKB_FRAGS; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - - f->page = alloc_page(gfp|__GFP_ZERO); - if (!f->page) - break; - - f->page_offset = 0; - f->size = PAGE_SIZE; - - skb->data_len += PAGE_SIZE; - skb->len += PAGE_SIZE; - skb->truesize += PAGE_SIZE; - skb_shinfo(skb)->nr_frags++; - - if (len < PAGE_SIZE) { - len = 0; - break; - } - len -= PAGE_SIZE; - } - - /* Too large, or alloc fail? */ - if (unlikely(len)) { - kfree_skb(skb); - skb = NULL; - } - - return skb; -} - /* Get packet from user space buffer */ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) { @@ -451,12 +391,14 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, return -EINVAL; } - if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) { + if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { tun->dev->stats.rx_dropped++; return -ENOMEM; } - if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) { + if (align) + skb_reserve(skb, align); + if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { tun->dev->stats.rx_dropped++; kfree_skb(skb); return -EFAULT; @@ -806,36 +748,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return err; } -static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) -{ - struct tun_struct *tun = file->private_data; - - if (!tun) - return -EBADFD; - - DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); - - strcpy(ifr->ifr_name, tun->dev->name); - - ifr->ifr_flags = 0; - - if (ifr->ifr_flags & TUN_TUN_DEV) - ifr->ifr_flags |= IFF_TUN; - else - ifr->ifr_flags |= IFF_TAP; - - if (tun->flags & TUN_NO_PI) - ifr->ifr_flags |= IFF_NO_PI; - - if (tun->flags & TUN_ONE_QUEUE) - ifr->ifr_flags |= IFF_ONE_QUEUE; - - if (tun->flags & TUN_VNET_HDR) - ifr->ifr_flags |= IFF_VNET_HDR; - - return 0; -} - /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct net_device *dev, unsigned long arg) @@ -921,15 +833,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); switch (cmd) { - case TUNGETIFF: - ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); - if (ret) - return ret; - - if (copy_to_user(argp, &ifr, sizeof(ifr))) - return -EFAULT; - break; - case TUNSETNOCSUM: /* Disable/Enable checksum */ if (arg) diff --git a/trunk/drivers/net/typhoon.c b/trunk/drivers/net/typhoon.c index 734ce0977f02..8549f1159a30 100644 --- a/trunk/drivers/net/typhoon.c +++ b/trunk/drivers/net/typhoon.c @@ -128,6 +128,7 @@ static const int multicast_filter_limit = 32; #include #include #include +#include #include #include "typhoon.h" diff --git a/trunk/drivers/net/usb/Kconfig b/trunk/drivers/net/usb/Kconfig index 0973b6e37024..68e198bd538b 100644 --- a/trunk/drivers/net/usb/Kconfig +++ b/trunk/drivers/net/usb/Kconfig @@ -154,6 +154,17 @@ config USB_NET_AX8817X This driver creates an interface named "ethX", where X depends on what other networking devices you have in use. +config USB_HSO + tristate "Option USB High Speed Mobile Devices" + depends on USB && RFKILL + default n + help + Choose this option if you have an Option HSDPA/HSUPA card. + These cards support downlink speeds of 7.2Mbps or greater. + + To compile this driver as a module, choose M here: the + module will be called hso. + config USB_NET_CDCETHER tristate "CDC Ethernet support (smart devices such as cable modems)" depends on USB_USBNET @@ -326,15 +337,5 @@ config USB_NET_ZAURUS really need this non-conformant variant of CDC Ethernet (or in some cases CDC MDLM) protocol, not "g_ether". -config USB_HSO - tristate "Option USB High Speed Mobile Devices" - depends on USB && RFKILL - default n - help - Choose this option if you have an Option HSDPA/HSUPA card. - These cards support downlink speeds of 7.2Mbps or greater. - - To compile this driver as a module, choose M here: the - module will be called hso. endmenu diff --git a/trunk/drivers/net/usb/hso.c b/trunk/drivers/net/usb/hso.c index 1b7cac77159e..031d07b105af 100644 --- a/trunk/drivers/net/usb/hso.c +++ b/trunk/drivers/net/usb/hso.c @@ -102,12 +102,8 @@ #define MAX_RX_URBS 2 -static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty) -{ - if (tty) - return tty->driver_data; - return NULL; -} +#define get_serial_by_tty(x) \ + (x ? (struct hso_serial *)x->driver_data : NULL) /*****************************************************************************/ /* Debugging functions */ @@ -298,25 +294,24 @@ static int hso_get_activity(struct hso_device *hso_dev); /* #define DEBUG */ -static inline struct hso_net *dev2net(struct hso_device *hso_dev) -{ - return hso_dev->port_data.dev_net; -} - -static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) -{ - return hso_dev->port_data.dev_serial; -} +#define dev2net(x) (x->port_data.dev_net) +#define dev2ser(x) (x->port_data.dev_serial) /* Debugging functions */ #ifdef DEBUG static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, unsigned int len) { - static char name[255]; + u8 i = 0; - sprintf(name, "hso[%d:%s]", line_count, func_name); - print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); + printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len); + + for (i = 0; i < len; i++) { + if (!(i % 16)) + printk("\n 0x%03x: ", i); + printk("%02x ", (unsigned char)buf[i]); + } + printk("\n"); } #define DUMP(buf_, len_) \ @@ -533,12 +528,13 @@ static struct hso_serial *get_serial_by_shared_int_and_type( static struct hso_serial *get_serial_by_index(unsigned index) { - struct hso_serial *serial = NULL; + struct hso_serial *serial; unsigned long flags; + if (!serial_table[index]) + return NULL; spin_lock_irqsave(&serial_table_lock, flags); - if (serial_table[index]) - serial = dev2ser(serial_table[index]); + serial = dev2ser(serial_table[index]); spin_unlock_irqrestore(&serial_table_lock, flags); return serial; @@ -565,7 +561,6 @@ static int get_free_serial_index(void) static void set_serial_by_index(unsigned index, struct hso_serial *serial) { unsigned long flags; - spin_lock_irqsave(&serial_table_lock, flags); if (serial) serial_table[index] = serial->parent; @@ -574,7 +569,7 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial) spin_unlock_irqrestore(&serial_table_lock, flags); } -/* log a meaningful explanation of an USB status */ +/* log a meaningfull explanation of an USB status */ static void log_usb_status(int status, const char *function) { char *explanation; @@ -1108,8 +1103,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) /* reset the rts and dtr */ /* do the actual close */ serial->open_count--; - kref_put(&serial->parent->ref, hso_serial_ref_free); if (serial->open_count <= 0) { + kref_put(&serial->parent->ref, hso_serial_ref_free); serial->open_count = 0; if (serial->tty) { serial->tty->driver_data = NULL; @@ -1472,8 +1467,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) return; } hso_put_activity(serial->parent); - if (serial->tty) - tty_wakeup(serial->tty); + tty_wakeup(serial->tty); hso_kick_transmit(serial); D1(" "); @@ -1544,8 +1538,7 @@ static void ctrl_callback(struct urb *urb) clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); } else { hso_put_activity(serial->parent); - if (serial->tty) - tty_wakeup(serial->tty); + tty_wakeup(serial->tty); /* response to a write command */ hso_kick_transmit(serial); } @@ -2659,7 +2652,7 @@ static void hso_free_interface(struct usb_interface *interface) hso_stop_net_device(network_table[i]); cancel_work_sync(&network_table[i]->async_put_intf); cancel_work_sync(&network_table[i]->async_get_intf); - if (rfk) + if(rfk) rfkill_unregister(rfk); hso_free_net_device(network_table[i]); } @@ -2730,7 +2723,7 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, } /* operations setup of the serial interface */ -static const struct tty_operations hso_serial_ops = { +static struct tty_operations hso_serial_ops = { .open = hso_serial_open, .close = hso_serial_close, .write = hso_serial_write, diff --git a/trunk/drivers/net/wireless/ath5k/base.c b/trunk/drivers/net/wireless/ath5k/base.c index b20a45aa8680..2028866f5995 100644 --- a/trunk/drivers/net/wireless/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath5k/base.c @@ -40,6 +40,7 @@ * */ +#include #include #include #include @@ -586,6 +587,7 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) ath5k_stop_hw(sc); free_irq(pdev->irq, sc); + pci_disable_msi(pdev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -614,10 +616,12 @@ ath5k_pci_resume(struct pci_dev *pdev) */ pci_write_config_byte(pdev, 0x41, 0); + pci_enable_msi(pdev); + err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); if (err) { ATH5K_ERR(sc, "request_irq failed\n"); - goto err_no_irq; + goto err_msi; } err = ath5k_init(sc); @@ -638,7 +642,8 @@ ath5k_pci_resume(struct pci_dev *pdev) return 0; err_irq: free_irq(pdev->irq, sc); -err_no_irq: +err_msi: + pci_disable_msi(pdev); pci_disable_device(pdev); return err; } diff --git a/trunk/drivers/net/wireless/ath9k/hw.c b/trunk/drivers/net/wireless/ath9k/hw.c index a17eb130f574..bde162f128ab 100644 --- a/trunk/drivers/net/wireless/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath9k/hw.c @@ -5017,11 +5017,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah, for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { - - /* workaround for gcc bug #37014 */ - volatile int tmp = abs(cur_vit_mask - bin); - - if (tmp < 75) + if ((abs(cur_vit_mask - bin)) < 75) mask_amt = 1; else mask_amt = 0; diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index 7205a936ec74..3bf3a869361f 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -4614,9 +4615,7 @@ static void b43_sprom_fixup(struct ssb_bus *bus) if (bus->bustype == SSB_BUSTYPE_PCI) { pdev = bus->host_pci; if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || - IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || - IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; } diff --git a/trunk/drivers/net/wireless/ipw2100.c b/trunk/drivers/net/wireless/ipw2100.c index 19a401c4a0dc..c6f886ec08a3 100644 --- a/trunk/drivers/net/wireless/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2100.c @@ -157,6 +157,7 @@ that only one external action is invoked at a time. #include #include #include +#include #include #include #include diff --git a/trunk/drivers/net/wireless/ipw2200.c b/trunk/drivers/net/wireless/ipw2200.c index dcce3542d5a7..36e8d2f6e7b4 100644 --- a/trunk/drivers/net/wireless/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2200.c @@ -31,6 +31,7 @@ ******************************************************************************/ #include "ipw2200.h" +#include #ifndef KBUILD_EXTMOD diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c index 3f51f3635344..b3931f6135a4 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c index e2581229d8b2..22bb26985c2e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -966,7 +967,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, s = iwl4965_get_sub_band(priv, channel); if (s >= EEPROM_TX_POWER_BANDS) { - IWL_ERROR("Tx Power can not find channel %d\n", channel); + IWL_ERROR("Tx Power can not find channel %d ", channel); return -1; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c index cbc01a00eaf4..f3d139b663e6 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c index 061ffba9c884..ed09e48b1b61 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c index c72f72579bea..9bd61809129f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c @@ -28,6 +28,7 @@ #include #include +#include #include struct iwl_priv; /* FIXME: remove */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 37155755efc5..bce53830b301 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -63,6 +63,7 @@ #include #include +#include #include #include @@ -145,7 +146,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) { u32 gp = iwl_read32(priv, CSR_EEPROM_GP); if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); return -ENOENT; } return 0; @@ -226,7 +227,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); if (ret < 0) { - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); ret = -ENOENT; goto err; } @@ -253,7 +254,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) } if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { - IWL_ERROR("Time out reading EEPROM[%d]\n", addr); + IWL_ERROR("Time out reading EEPROM[%d]", addr); ret = -ETIMEDOUT; goto done; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c index 2eb03eea1908..6512834bb916 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -28,6 +28,7 @@ #include #include +#include #include #include "iwl-dev.h" /* FIXME: remove */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-power.c b/trunk/drivers/net/wireless/iwlwifi/iwl-power.c index a099c9e30e55..028e3053c0ca 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-power.c @@ -29,6 +29,7 @@ #include #include +#include #include #include diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-sta.c b/trunk/drivers/net/wireless/iwlwifi/iwl-sta.c index 6283a3a707f5..60a6e0106036 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -207,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, case WLAN_HT_CAP_MIMO_PS_DISABLED: break; default: - IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); + IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); break; } @@ -969,7 +969,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) return priv->hw_params.bcast_sta_id; default: - IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); + IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); return priv->hw_params.bcast_sta_id; } } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c index d82823b5c8ab..4108c7c8f00f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -493,7 +493,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) /* Alloc keep-warm buffer */ ret = iwl_kw_alloc(priv); if (ret) { - IWL_ERROR("Keep Warm allocation failed\n"); + IWL_ERROR("Keep Warm allocation failed"); goto error_kw; } spin_lock_irqsave(&priv->lock, flags); @@ -1463,7 +1463,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); if (scd_flow >= priv->hw_params.max_txq_num) { - IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); + IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); return; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c index b775d5bab668..444847ab1b5a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -1557,7 +1558,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv) BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { - IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); return -ENOENT; } @@ -1582,7 +1583,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv) } if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { - IWL_ERROR("Time out reading EEPROM[%d]\n", addr); + IWL_ERROR("Time out reading EEPROM[%d]", addr); return -ETIMEDOUT; } e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); @@ -2506,7 +2507,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h return priv->hw_setting.bcast_sta_id; default: - IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); + IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); return priv->hw_setting.bcast_sta_id; } } diff --git a/trunk/drivers/net/wireless/p54/p54common.c b/trunk/drivers/net/wireless/p54/p54common.c index 29be3dc8ee09..83cd85e1f847 100644 --- a/trunk/drivers/net/wireless/p54/p54common.c +++ b/trunk/drivers/net/wireless/p54/p54common.c @@ -413,12 +413,12 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) last_addr = range->end_addr; __skb_unlink(entry, &priv->tx_queue); memset(&info->status, 0, sizeof(info->status)); + priv->tx_stats[skb_get_queue_mapping(skb)].len--; entry_hdr = (struct p54_control_hdr *) entry->data; entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) pad = entry_data->align[0]; - priv->tx_stats[entry_data->hw_queue - 4].len--; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (!(payload->status & 0x01)) info->flags |= IEEE80211_TX_STAT_ACK; @@ -557,7 +557,6 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) struct p54_tx_control_allocdata *txhdr; size_t padding, len; u8 rate; - u8 cts_rate = 0x20; current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; if (unlikely(current_queue->len > current_queue->limit)) @@ -582,28 +581,28 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); hdr->retry1 = hdr->retry2 = info->control.retry_limit; + memset(txhdr->wep_key, 0x0, 16); + txhdr->padding = 0; + txhdr->padding2 = 0; + /* TODO: add support for alternate retry TX rates */ rate = ieee80211_get_tx_rate(dev, info)->hw_value; - if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) { + if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) rate |= 0x10; - cts_rate |= 0x10; - } - if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { + if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) rate |= 0x40; - cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; - } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { + else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) rate |= 0x20; - cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; - } memset(txhdr->rateset, rate, 8); - txhdr->key_type = 0; - txhdr->key_len = 0; - txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; - txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? + txhdr->wep_key_present = 0; + txhdr->wep_key_len = 0; + txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4); + txhdr->magic4 = 0; + txhdr->antenna = (info->antenna_sel_tx == 0) ? 2 : info->antenna_sel_tx - 1; txhdr->output_power = 0x7f; // HW Maximum - txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? - 0 : cts_rate; + txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? + 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); if (padding) txhdr->align[0] = padding; @@ -837,21 +836,10 @@ static int p54_start(struct ieee80211_hw *dev) struct p54_common *priv = dev->priv; int err; - if (!priv->cached_vdcf) { - priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+ - priv->tx_hdr_len + sizeof(struct p54_control_hdr), - GFP_KERNEL); - - if (!priv->cached_vdcf) - return -ENOMEM; - } - err = priv->open(dev); if (!err) priv->mode = IEEE80211_IF_TYPE_MNTR; - p54_init_vdcf(dev); - return err; } @@ -1031,6 +1019,15 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + sizeof(struct p54_tx_control_allocdata); + priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + + priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL); + + if (!priv->cached_vdcf) { + ieee80211_free_hw(dev); + return NULL; + } + + p54_init_vdcf(dev); mutex_init(&priv->conf_mutex); return dev; diff --git a/trunk/drivers/net/wireless/p54/p54common.h b/trunk/drivers/net/wireless/p54/p54common.h index 8db6c0e8e540..2245fcce92dc 100644 --- a/trunk/drivers/net/wireless/p54/p54common.h +++ b/trunk/drivers/net/wireless/p54/p54common.h @@ -183,16 +183,16 @@ struct p54_frame_sent_hdr { struct p54_tx_control_allocdata { u8 rateset[8]; - u8 unalloc0[2]; - u8 key_type; - u8 key_len; - u8 key[16]; - u8 hw_queue; - u8 unalloc1[9]; - u8 tx_antenna; + u16 padding; + u8 wep_key_present; + u8 wep_key_len; + u8 wep_key[16]; + __le32 frame_type; + u32 padding2; + __le16 magic4; + u8 antenna; u8 output_power; - u8 cts_rate; - u8 unalloc2[3]; + __le32 magic5; u8 align[0]; } __attribute__ ((packed)); diff --git a/trunk/drivers/net/wireless/p54/p54usb.c b/trunk/drivers/net/wireless/p54/p54usb.c index cbaca23a9453..815c095ef797 100644 --- a/trunk/drivers/net/wireless/p54/p54usb.c +++ b/trunk/drivers/net/wireless/p54/p54usb.c @@ -109,17 +109,7 @@ static void p54u_rx_cb(struct urb *urb) urb->context = skb; skb_queue_tail(&priv->rx_queue, skb); } else { - if (!priv->hw_type) - skb_push(skb, sizeof(struct net2280_tx_hdr)); - - skb_reset_tail_pointer(skb); skb_trim(skb, 0); - if (urb->transfer_buffer != skb_tail_pointer(skb)) { - /* this should not happen */ - WARN_ON(1); - urb->transfer_buffer = skb_tail_pointer(skb); - } - skb_queue_tail(&priv->rx_queue, skb); } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00queue.h b/trunk/drivers/net/wireless/rt2x00/rt2x00queue.h index ff78e52ce43c..a4a8c57004db 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00queue.h @@ -173,10 +173,10 @@ struct rxdone_entry_desc { * frame transmission failed due to excessive retries. */ enum txdone_entry_desc_flags { - TXDONE_UNKNOWN, - TXDONE_SUCCESS, - TXDONE_FAILURE, - TXDONE_EXCESSIVE_RETRY, + TXDONE_UNKNOWN = 1 << 0, + TXDONE_SUCCESS = 1 << 1, + TXDONE_FAILURE = 1 << 2, + TXDONE_EXCESSIVE_RETRY = 1 << 3, }; /** diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c index 2050227ea530..8d76bb2e0312 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -181,7 +181,6 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) * (Only indirectly by looking at the failed TX counters * in the register). */ - txdesc.flags = 0; if (!urb->status) __set_bit(TXDONE_UNKNOWN, &txdesc.flags); else diff --git a/trunk/drivers/net/wireless/rtl8187_dev.c b/trunk/drivers/net/wireless/rtl8187_dev.c index ca5deb6244e6..57376fb993ed 100644 --- a/trunk/drivers/net/wireless/rtl8187_dev.c +++ b/trunk/drivers/net/wireless/rtl8187_dev.c @@ -40,7 +40,6 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { /* Netgear */ {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, - {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B}, /* HP */ {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, /* Sitecom */ diff --git a/trunk/drivers/sbus/sbus.c b/trunk/drivers/sbus/sbus.c index 9c129248466c..73a86d09bba8 100644 --- a/trunk/drivers/sbus/sbus.c +++ b/trunk/drivers/sbus/sbus.c @@ -7,13 +7,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include diff --git a/trunk/drivers/serial/sunhv.c b/trunk/drivers/serial/sunhv.c index e41766d08035..aeeec5588afd 100644 --- a/trunk/drivers/serial/sunhv.c +++ b/trunk/drivers/serial/sunhv.c @@ -17,11 +17,11 @@ #include #include #include -#include #include #include #include +#include #include #if defined(CONFIG_MAGIC_SYSRQ) diff --git a/trunk/drivers/serial/sunsab.c b/trunk/drivers/serial/sunsab.c index 29b4458abf74..15ee497e1c78 100644 --- a/trunk/drivers/serial/sunsab.c +++ b/trunk/drivers/serial/sunsab.c @@ -32,11 +32,11 @@ #include #include #include -#include #include #include #include +#include #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ diff --git a/trunk/drivers/serial/sunsu.c b/trunk/drivers/serial/sunsu.c index a378464f9292..e24e68235088 100644 --- a/trunk/drivers/serial/sunsu.c +++ b/trunk/drivers/serial/sunsu.c @@ -35,11 +35,11 @@ #include #include #include -#include #include #include #include +#include #if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ diff --git a/trunk/drivers/serial/sunzilog.c b/trunk/drivers/serial/sunzilog.c index 3cb4c8aee13f..0f3d69b86d67 100644 --- a/trunk/drivers/serial/sunzilog.c +++ b/trunk/drivers/serial/sunzilog.c @@ -32,11 +32,11 @@ #include #endif #include -#include #include #include #include +#include #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ diff --git a/trunk/drivers/ssb/main.c b/trunk/drivers/ssb/main.c index 87ab2443e66d..d831a2beff39 100644 --- a/trunk/drivers/ssb/main.c +++ b/trunk/drivers/ssb/main.c @@ -1165,19 +1165,15 @@ EXPORT_SYMBOL(ssb_dma_translation); int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) { -#ifdef CONFIG_SSB_PCIHOST int err; -#endif switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST err = pci_set_dma_mask(dev->bus->host_pci, mask); if (err) return err; err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); return err; -#endif case SSB_BUSTYPE_SSB: return dma_set_mask(dev->dev, mask); default: @@ -1192,7 +1188,6 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST if (gfp_flags & GFP_DMA) { /* Workaround: The PCI API does not support passing * a GFP flag. */ @@ -1200,7 +1195,6 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, size, dma_handle, gfp_flags); } return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); -#endif case SSB_BUSTYPE_SSB: return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); default: @@ -1216,7 +1210,6 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST if (gfp_flags & GFP_DMA) { /* Workaround: The PCI API does not support passing * a GFP flag. */ @@ -1227,7 +1220,6 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, pci_free_consistent(dev->bus->host_pci, size, vaddr, dma_handle); return; -#endif case SSB_BUSTYPE_SSB: dma_free_coherent(dev->dev, size, vaddr, dma_handle); return; diff --git a/trunk/drivers/usb/Kconfig b/trunk/drivers/usb/Kconfig index bcefbddeba50..755823cdf62a 100644 --- a/trunk/drivers/usb/Kconfig +++ b/trunk/drivers/usb/Kconfig @@ -95,18 +95,16 @@ config USB source "drivers/usb/core/Kconfig" -source "drivers/usb/mon/Kconfig" - source "drivers/usb/host/Kconfig" -source "drivers/usb/musb/Kconfig" - source "drivers/usb/class/Kconfig" source "drivers/usb/storage/Kconfig" source "drivers/usb/image/Kconfig" +source "drivers/usb/mon/Kconfig" + comment "USB port drivers" depends on USB diff --git a/trunk/drivers/usb/atm/cxacru.c b/trunk/drivers/usb/atm/cxacru.c index 9aea43a8c4ad..507a9bd0d77c 100644 --- a/trunk/drivers/usb/atm/cxacru.c +++ b/trunk/drivers/usb/atm/cxacru.c @@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ offd = le32_to_cpu(buf[offb++]); if (offd >= size) { if (printk_ratelimit()) - usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n", + usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n", offd, cm); ret = -EIO; goto cleanup; diff --git a/trunk/drivers/usb/class/cdc-acm.c b/trunk/drivers/usb/class/cdc-acm.c index efc4373ededb..0725b1871f23 100644 --- a/trunk/drivers/usb/class/cdc-acm.c +++ b/trunk/drivers/usb/class/cdc-acm.c @@ -51,7 +51,6 @@ */ #undef DEBUG -#undef VERBOSE_DEBUG #include #include @@ -71,9 +70,6 @@ #include "cdc-acm.h" - -#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */ - /* * Version Information */ @@ -89,12 +85,6 @@ static DEFINE_MUTEX(open_mutex); #define ACM_READY(acm) (acm && acm->dev && acm->used) -#ifdef VERBOSE_DEBUG -#define verbose 1 -#else -#define verbose 0 -#endif - /* * Functions for ACM control messages. */ @@ -146,17 +136,19 @@ static int acm_wb_alloc(struct acm *acm) static int acm_wb_is_avail(struct acm *acm) { int i, n; - unsigned long flags; n = ACM_NW; - spin_lock_irqsave(&acm->write_lock, flags); for (i = 0; i < ACM_NW; i++) { n -= acm->wb[i].use; } - spin_unlock_irqrestore(&acm->write_lock, flags); return n; } +static inline int acm_wb_is_used(struct acm *acm, int wbn) +{ + return acm->wb[wbn].use; +} + /* * Finish write. */ @@ -165,6 +157,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb) unsigned long flags; spin_lock_irqsave(&acm->write_lock, flags); + acm->write_ready = 1; wb->use = 0; acm->transmitting--; spin_unlock_irqrestore(&acm->write_lock, flags); @@ -197,25 +190,40 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; - struct acm_wb *wb = &acm->wb[wbn]; + struct acm_wb *wb; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { - wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } + if (!acm->write_ready) { + spin_unlock_irqrestore(&acm->write_lock, flags); + return 0; /* A white lie */ + } + + wb = &acm->wb[wbn]; + if(acm_wb_is_avail(acm) <= 1) + acm->write_ready = 0; + dbg("%s susp_count: %d", __func__, acm->susp_count); if (acm->susp_count) { + acm->old_ready = acm->write_ready; acm->delayed_wb = wb; + acm->write_ready = 0; schedule_work(&acm->waker); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); + if (!acm_wb_is_used(acm, wbn)) { + spin_unlock_irqrestore(&acm->write_lock, flags); + return 0; + } + rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); @@ -480,28 +488,22 @@ static void acm_rx_tasklet(unsigned long _acm) /* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb) { + struct acm *acm; struct acm_wb *wb = urb->context; - struct acm *acm = wb->instance; - if (verbose || urb->status - || (urb->actual_length != urb->transfer_buffer_length)) - dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n", - urb->actual_length, - urb->transfer_buffer_length, - urb->status); + dbg("Entering acm_write_bulk with status %d", urb->status); + acm = wb->instance; acm_write_done(acm, wb); if (ACM_READY(acm)) schedule_work(&acm->work); - else - wake_up_interruptible(&acm->drain_wait); } static void acm_softint(struct work_struct *work) { struct acm *acm = container_of(work, struct acm, work); - - dev_vdbg(&acm->data->dev, "tx work\n"); + dbg("Entering acm_softint."); + if (!ACM_READY(acm)) return; tty_wakeup(acm->tty); @@ -510,6 +512,7 @@ static void acm_softint(struct work_struct *work) static void acm_waker(struct work_struct *waker) { struct acm *acm = container_of(waker, struct acm, waker); + long flags; int rv; rv = usb_autopm_get_interface(acm->control); @@ -521,6 +524,9 @@ static void acm_waker(struct work_struct *waker) acm_start_wb(acm, acm->delayed_wb); acm->delayed_wb = NULL; } + spin_lock_irqsave(&acm->write_lock, flags); + acm->write_ready = acm->old_ready; + spin_unlock_irqrestore(&acm->write_lock, flags); usb_autopm_put_interface(acm->control); } @@ -622,8 +628,6 @@ static void acm_tty_unregister(struct acm *acm) kfree(acm); } -static int acm_tty_chars_in_buffer(struct tty_struct *tty); - static void acm_tty_close(struct tty_struct *tty, struct file *filp) { struct acm *acm = tty->driver_data; @@ -638,13 +642,6 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) if (acm->dev) { usb_autopm_get_interface(acm->control); acm_set_control(acm, acm->ctrlout = 0); - - /* try letting the last writes drain naturally */ - wait_event_interruptible_timeout(acm->drain_wait, - (ACM_NW == acm_wb_is_avail(acm)) - || !acm->dev, - ACM_CLOSE_TIMEOUT * HZ); - usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); @@ -700,7 +697,7 @@ static int acm_tty_write_room(struct tty_struct *tty) * Do not let the line discipline to know that we have a reserve, * or it might get too enthusiastic. */ - return acm_wb_is_avail(acm) ? acm->writesize : 0; + return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0; } static int acm_tty_chars_in_buffer(struct tty_struct *tty) @@ -1075,11 +1072,11 @@ static int acm_probe (struct usb_interface *intf, acm->urb_task.data = (unsigned long) acm; INIT_WORK(&acm->work, acm_softint); INIT_WORK(&acm->waker, acm_waker); - init_waitqueue_head(&acm->drain_wait); spin_lock_init(&acm->throttle_lock); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); + acm->write_ready = 1; acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); @@ -1111,11 +1108,9 @@ static int acm_probe (struct usb_interface *intf, rcv->instance = acm; } for (i = 0; i < num_rx_buf; i++) { - struct acm_rb *rb = &(acm->rb[i]); + struct acm_rb *buf = &(acm->rb[i]); - rb->base = usb_buffer_alloc(acm->dev, readsize, - GFP_KERNEL, &rb->dma); - if (!rb->base) { + if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) { dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); goto alloc_fail7; } @@ -1177,7 +1172,6 @@ static int acm_probe (struct usb_interface *intf, acm_set_line(acm, &acm->line); usb_driver_claim_interface(&acm_driver, data_interface, acm); - usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); tty_register_device(acm_tty_driver, minor, &control_interface->dev); @@ -1227,11 +1221,11 @@ static void acm_disconnect(struct usb_interface *intf) struct acm *acm = usb_get_intfdata(intf); struct usb_device *usb_dev = interface_to_usbdev(intf); - /* sibling interface is already cleaning up */ - if (!acm) - return; - mutex_lock(&open_mutex); + if (!acm || !acm->dev) { + mutex_unlock(&open_mutex); + return; + } if (acm->country_codes){ device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); diff --git a/trunk/drivers/usb/class/cdc-acm.h b/trunk/drivers/usb/class/cdc-acm.h index 1f95e7aa1b66..85c3aaaab7c5 100644 --- a/trunk/drivers/usb/class/cdc-acm.h +++ b/trunk/drivers/usb/class/cdc-acm.h @@ -106,6 +106,8 @@ struct acm { struct list_head spare_read_bufs; struct list_head filled_read_bufs; int write_used; /* number of non-empty write buffers */ + int write_ready; /* write urb is not running */ + int old_ready; int processing; int transmitting; spinlock_t write_lock; @@ -113,7 +115,6 @@ struct acm { struct usb_cdc_line_coding line; /* bits, stop, parity */ struct work_struct work; /* work queue entry for line discipline waking up */ struct work_struct waker; - wait_queue_head_t drain_wait; /* close processing */ struct tasklet_struct urb_task; /* rx processing */ spinlock_t throttle_lock; /* synchronize throtteling and read callback */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ diff --git a/trunk/drivers/usb/core/driver.c b/trunk/drivers/usb/core/driver.c index 2be37fe466f2..ddb54e14a5c5 100644 --- a/trunk/drivers/usb/core/driver.c +++ b/trunk/drivers/usb/core/driver.c @@ -774,6 +774,7 @@ void usb_deregister(struct usb_driver *driver) } EXPORT_SYMBOL_GPL(usb_deregister); + /* Forced unbinding of a USB interface driver, either because * it doesn't support pre_reset/post_reset/reset_resume or * because it doesn't support suspend/resume. @@ -820,8 +821,6 @@ void usb_rebind_intf(struct usb_interface *intf) dev_warn(&intf->dev, "rebind failed: %d\n", rc); } -#ifdef CONFIG_PM - #define DO_UNBIND 0 #define DO_REBIND 1 @@ -873,6 +872,8 @@ static void do_unbind_rebind(struct usb_device *udev, int action) } } +#ifdef CONFIG_PM + /* Caller has locked udev's pm_mutex */ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) { diff --git a/trunk/drivers/usb/core/message.c b/trunk/drivers/usb/core/message.c index 286b4431a097..586d6f1376cf 100644 --- a/trunk/drivers/usb/core/message.c +++ b/trunk/drivers/usb/core/message.c @@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); - usb_remove_sysfs_intf_files(interface); device_del(&interface->dev); + usb_remove_sysfs_intf_files(interface); } /* Now that the interfaces are unbound, nobody should diff --git a/trunk/drivers/usb/gadget/Kconfig b/trunk/drivers/usb/gadget/Kconfig index acc95b2ac6f8..c6a8c6b1116a 100644 --- a/trunk/drivers/usb/gadget/Kconfig +++ b/trunk/drivers/usb/gadget/Kconfig @@ -284,16 +284,6 @@ config USB_LH7A40X default USB_GADGET select USB_GADGET_SELECTED -# built in ../musb along with host support -config USB_GADGET_MUSB_HDRC - boolean "Inventra HDRC USB Peripheral (TI, ...)" - depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) - select USB_GADGET_DUALSPEED - select USB_GADGET_SELECTED - help - This OTG-capable silicon IP is used in dual designs including - the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010. - config USB_GADGET_OMAP boolean "OMAP USB Device Controller" depends on ARCH_OMAP diff --git a/trunk/drivers/usb/gadget/dummy_hcd.c b/trunk/drivers/usb/gadget/dummy_hcd.c index 7600a0c78753..21d1406af9ee 100644 --- a/trunk/drivers/usb/gadget/dummy_hcd.c +++ b/trunk/drivers/usb/gadget/dummy_hcd.c @@ -542,14 +542,13 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req, req->req.context = dum; req->req.complete = fifo_complete; - list_add_tail(&req->queue, &ep->queue); spin_unlock (&dum->lock); _req->actual = _req->length; _req->status = 0; _req->complete (_ep, _req); spin_lock (&dum->lock); - } else - list_add_tail(&req->queue, &ep->queue); + } + list_add_tail (&req->queue, &ep->queue); spin_unlock_irqrestore (&dum->lock, flags); /* real hardware would likely enable transfers here, in case diff --git a/trunk/drivers/usb/gadget/f_acm.c b/trunk/drivers/usb/gadget/f_acm.c index 5ee1590b8e9c..d8faccf27895 100644 --- a/trunk/drivers/usb/gadget/f_acm.c +++ b/trunk/drivers/usb/gadget/f_acm.c @@ -47,37 +47,18 @@ struct f_acm { u8 ctrl_id, data_id; u8 port_num; - u8 pending; - - /* lock is mostly for pending and notify_req ... they get accessed - * by callbacks both from tty (open/close/break) under its spinlock, - * and notify_req.complete() which can't use that lock. - */ - spinlock_t lock; - + struct usb_descriptor_header **fs_function; struct acm_ep_descs fs; + struct usb_descriptor_header **hs_function; struct acm_ep_descs hs; struct usb_ep *notify; struct usb_endpoint_descriptor *notify_desc; - struct usb_request *notify_req; struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ - - /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */ u16 port_handshake_bits; -#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ -#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ - - /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */ - u16 serial_state; -#define ACM_CTRL_OVERRUN (1 << 6) -#define ACM_CTRL_PARITY (1 << 5) -#define ACM_CTRL_FRAMING (1 << 4) -#define ACM_CTRL_RI (1 << 3) -#define ACM_CTRL_BRK (1 << 2) -#define ACM_CTRL_DSR (1 << 1) -#define ACM_CTRL_DCD (1 << 0) +#define RS232_RTS (1 << 1) /* unused with full duplex */ +#define RS232_DTR (1 << 0) /* host is ready for data r/w */ }; static inline struct f_acm *func_to_acm(struct usb_function *f) @@ -85,17 +66,12 @@ static inline struct f_acm *func_to_acm(struct usb_function *f) return container_of(f, struct f_acm, port.func); } -static inline struct f_acm *port_to_acm(struct gserial *p) -{ - return container_of(p, struct f_acm, port); -} - /*-------------------------------------------------------------------------*/ /* notification endpoint uses smallish and infrequent fixed-size messages */ #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ -#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ +#define GS_NOTIFY_MAXPACKET 8 /* interface and class descriptors: */ @@ -141,7 +117,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = { .bLength = sizeof(acm_descriptor), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ACM_TYPE, - .bmCapabilities = USB_CDC_CAP_LINE, + .bmCapabilities = (1 << 1), }; static struct usb_cdc_union_desc acm_union_desc __initdata = { @@ -301,11 +277,6 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). - * - * Note CDC spec table 4 lists the ACM request profile. It requires - * encapsulated command support ... we don't handle any, and respond - * to them by stalling. Options include get/set/clear comm features - * (not that useful) and SEND_BREAK. */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { @@ -341,7 +312,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) value = 0; /* FIXME we should not allow data to flow until the - * host sets the ACM_CTRL_DTR bit; and when it clears + * host sets the RS232_DTR bit; and when it clears * that bit, we should return to that no-flow state. */ acm->port_handshake_bits = w_value; @@ -379,6 +350,9 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) /* we know alt == 0, so this is an activation or a reset */ if (intf == acm->ctrl_id) { + /* REVISIT this may need more work when we start to + * send notifications ... + */ if (acm->notify->driver_data) { VDBG(cdev, "reset acm control interface %d\n", intf); usb_ep_disable(acm->notify); @@ -423,128 +397,6 @@ static void acm_disable(struct usb_function *f) /*-------------------------------------------------------------------------*/ -/** - * acm_cdc_notify - issue CDC notification to host - * @acm: wraps host to be notified - * @type: notification type - * @value: Refer to cdc specs, wValue field. - * @data: data to be sent - * @length: size of data - * Context: irqs blocked, acm->lock held, acm_notify_req non-null - * - * Returns zero on sucess or a negative errno. - * - * See section 6.3.5 of the CDC 1.1 specification for information - * about the only notification we issue: SerialState change. - */ -static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value, - void *data, unsigned length) -{ - struct usb_ep *ep = acm->notify; - struct usb_request *req; - struct usb_cdc_notification *notify; - const unsigned len = sizeof(*notify) + length; - void *buf; - int status; - - req = acm->notify_req; - acm->notify_req = NULL; - acm->pending = false; - - req->length = len; - notify = req->buf; - buf = notify + 1; - - notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS - | USB_RECIP_INTERFACE; - notify->bNotificationType = type; - notify->wValue = cpu_to_le16(value); - notify->wIndex = cpu_to_le16(acm->ctrl_id); - notify->wLength = cpu_to_le16(length); - memcpy(buf, data, length); - - status = usb_ep_queue(ep, req, GFP_ATOMIC); - if (status < 0) { - ERROR(acm->port.func.config->cdev, - "acm ttyGS%d can't notify serial state, %d\n", - acm->port_num, status); - acm->notify_req = req; - } - - return status; -} - -static int acm_notify_serial_state(struct f_acm *acm) -{ - struct usb_composite_dev *cdev = acm->port.func.config->cdev; - int status; - - spin_lock(&acm->lock); - if (acm->notify_req) { - DBG(cdev, "acm ttyGS%d serial state %04x\n", - acm->port_num, acm->serial_state); - status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, - 0, &acm->serial_state, sizeof(acm->serial_state)); - } else { - acm->pending = true; - status = 0; - } - spin_unlock(&acm->lock); - return status; -} - -static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) -{ - struct f_acm *acm = req->context; - u8 doit = false; - - /* on this call path we do NOT hold the port spinlock, - * which is why ACM needs its own spinlock - */ - spin_lock(&acm->lock); - if (req->status != -ESHUTDOWN) - doit = acm->pending; - acm->notify_req = req; - spin_unlock(&acm->lock); - - if (doit) - acm_notify_serial_state(acm); -} - -/* connect == the TTY link is open */ - -static void acm_connect(struct gserial *port) -{ - struct f_acm *acm = port_to_acm(port); - - acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD; - acm_notify_serial_state(acm); -} - -static void acm_disconnect(struct gserial *port) -{ - struct f_acm *acm = port_to_acm(port); - - acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD); - acm_notify_serial_state(acm); -} - -static int acm_send_break(struct gserial *port, int duration) -{ - struct f_acm *acm = port_to_acm(port); - u16 state; - - state = acm->serial_state; - state &= ~ACM_CTRL_BRK; - if (duration) - state |= ACM_CTRL_BRK; - - acm->serial_state = state; - return acm_notify_serial_state(acm); -} - -/*-------------------------------------------------------------------------*/ - /* ACM function driver setup/binding */ static int __init acm_bind(struct usb_configuration *c, struct usb_function *f) @@ -593,20 +445,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) acm->notify = ep; ep->driver_data = cdev; /* claim */ - /* allocate notification */ - acm->notify_req = gs_alloc_req(ep, - sizeof(struct usb_cdc_notification) + 2, - GFP_KERNEL); - if (!acm->notify_req) - goto fail; - - acm->notify_req->complete = acm_cdc_notify_complete; - acm->notify_req->context = acm; - /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(acm_fs_function); - if (!f->descriptors) - goto fail; acm->fs.in = usb_find_endpoint(acm_fs_function, f->descriptors, &acm_fs_in_desc); @@ -638,6 +478,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) f->hs_descriptors, &acm_hs_notify_desc); } + /* FIXME provide a callback for triggering notifications */ + DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", acm->port_num, gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", @@ -646,9 +488,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - if (acm->notify_req) - gs_free_req(acm->notify, acm->notify_req); - /* we might as well release our claims on endpoints */ if (acm->notify) acm->notify->driver_data = NULL; @@ -665,13 +504,10 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) static void acm_unbind(struct usb_configuration *c, struct usb_function *f) { - struct f_acm *acm = func_to_acm(f); - if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); - gs_free_req(acm->notify, acm->notify_req); - kfree(acm); + kfree(func_to_acm(f)); } /* Some controllers can't support CDC ACM ... */ @@ -735,14 +571,8 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num) if (!acm) return -ENOMEM; - spin_lock_init(&acm->lock); - acm->port_num = port_num; - acm->port.connect = acm_connect; - acm->port.disconnect = acm_disconnect; - acm->port.send_break = acm_send_break; - acm->port.func.name = "acm"; acm->port.func.strings = acm_strings; /* descriptors are per-instance copies */ diff --git a/trunk/drivers/usb/gadget/f_ecm.c b/trunk/drivers/usb/gadget/f_ecm.c index a2b5c092bda0..0822e9d7693a 100644 --- a/trunk/drivers/usb/gadget/f_ecm.c +++ b/trunk/drivers/usb/gadget/f_ecm.c @@ -63,7 +63,9 @@ struct f_ecm { char ethaddr[14]; + struct usb_descriptor_header **fs_function; struct ecm_ep_descs fs; + struct usb_descriptor_header **hs_function; struct ecm_ep_descs hs; struct usb_ep *notify; diff --git a/trunk/drivers/usb/gadget/f_rndis.c b/trunk/drivers/usb/gadget/f_rndis.c index 659b3d9671c4..61652f0f13fd 100644 --- a/trunk/drivers/usb/gadget/f_rndis.c +++ b/trunk/drivers/usb/gadget/f_rndis.c @@ -85,7 +85,9 @@ struct f_rndis { u8 ethaddr[ETH_ALEN]; int config; + struct usb_descriptor_header **fs_function; struct rndis_ep_descs fs; + struct usb_descriptor_header **hs_function; struct rndis_ep_descs hs; struct usb_ep *notify; diff --git a/trunk/drivers/usb/gadget/f_serial.c b/trunk/drivers/usb/gadget/f_serial.c index fe5674db344b..1b6bde9aaed5 100644 --- a/trunk/drivers/usb/gadget/f_serial.c +++ b/trunk/drivers/usb/gadget/f_serial.c @@ -36,7 +36,9 @@ struct f_gser { u8 data_id; u8 port_num; + struct usb_descriptor_header **fs_function; struct gser_descs fs; + struct usb_descriptor_header **hs_function; struct gser_descs hs; }; diff --git a/trunk/drivers/usb/gadget/f_subset.c b/trunk/drivers/usb/gadget/f_subset.c index acb8d233aa1d..afeab9a0523f 100644 --- a/trunk/drivers/usb/gadget/f_subset.c +++ b/trunk/drivers/usb/gadget/f_subset.c @@ -66,7 +66,9 @@ struct f_gether { char ethaddr[14]; + struct usb_descriptor_header **fs_function; struct geth_descs fs; + struct usb_descriptor_header **hs_function; struct geth_descs hs; }; diff --git a/trunk/drivers/usb/gadget/gadget_chips.h b/trunk/drivers/usb/gadget/gadget_chips.h index 17d9905101b7..5246e8fef2b2 100644 --- a/trunk/drivers/usb/gadget/gadget_chips.h +++ b/trunk/drivers/usb/gadget/gadget_chips.h @@ -11,10 +11,6 @@ * Some are available on 2.4 kernels; several are available, but not * yet pushed in the 2.6 mainline tree. */ - -#ifndef __GADGET_CHIPS_H -#define __GADGET_CHIPS_H - #ifdef CONFIG_USB_GADGET_NET2280 #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) #else @@ -241,5 +237,3 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget) /* Everything else is *presumably* fine ... */ return true; } - -#endif /* __GADGET_CHIPS_H */ diff --git a/trunk/drivers/usb/gadget/omap_udc.c b/trunk/drivers/usb/gadget/omap_udc.c index 574c53831a05..376e80c07530 100644 --- a/trunk/drivers/usb/gadget/omap_udc.c +++ b/trunk/drivers/usb/gadget/omap_udc.c @@ -54,7 +54,6 @@ #include #include -#include #include "omap_udc.h" @@ -2311,10 +2310,10 @@ static int proc_otg_show(struct seq_file *s) u32 trans; char *ctrl_name; - tmp = omap_readl(OTG_REV); + tmp = OTG_REV_REG; if (cpu_is_omap24xx()) { ctrl_name = "control_devconf"; - trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); + trans = CONTROL_DEVCONF_REG; } else { ctrl_name = "tranceiver_ctrl"; trans = omap_readw(USB_TRANSCEIVER_CTRL); diff --git a/trunk/drivers/usb/gadget/u_serial.c b/trunk/drivers/usb/gadget/u_serial.c index 53d59287f2bc..abf9505d3a75 100644 --- a/trunk/drivers/usb/gadget/u_serial.c +++ b/trunk/drivers/usb/gadget/u_serial.c @@ -52,16 +52,13 @@ * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. */ -#define PREFIX "ttyGS" - /* * gserial is the lifecycle interface, used by USB functions * gs_port is the I/O nexus, used by the tty driver * tty_struct links to the tty/filesystem framework * * gserial <---> gs_port ... links will be null when the USB link is - * inactive; managed by gserial_{connect,disconnect}(). each gserial - * instance can wrap its own USB control protocol. + * inactive; managed by gserial_{connect,disconnect}(). * gserial->ioport == usb_ep->driver_data ... gs_port * gs_port->port_usb ... gserial * @@ -103,8 +100,6 @@ struct gs_port { wait_queue_head_t close_wait; /* wait for last close */ struct list_head read_pool; - struct list_head read_queue; - unsigned n_read; struct tasklet_struct push; struct list_head write_pool; @@ -182,7 +177,7 @@ static void gs_buf_clear(struct gs_buf *gb) /* * gs_buf_data_avail * - * Return the number of bytes of data written into the circular + * Return the number of bytes of data available in the circular * buffer. */ static unsigned gs_buf_data_avail(struct gs_buf *gb) @@ -283,7 +278,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */ -struct usb_request * +static struct usb_request * gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; @@ -307,7 +302,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) * * Free a usb_request and its buffer. */ -void gs_free_req(struct usb_ep *ep, struct usb_request *req) +static void gs_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); @@ -372,9 +367,11 @@ __acquires(&port->port_lock) req->length = len; list_del(&req->list); - pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", - port->port_num, len, *((u8 *)req->buf), +#ifdef VERBOSE_DEBUG + pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", + __func__, in->name, len, *((u8 *)req->buf), *((u8 *)req->buf+1), *((u8 *)req->buf+2)); +#endif /* Drop lock while we call out of driver; completions * could be issued while we do so. Disconnection may @@ -404,6 +401,56 @@ __acquires(&port->port_lock) return status; } +static void gs_rx_push(unsigned long _port) +{ + struct gs_port *port = (void *)_port; + struct tty_struct *tty = port->port_tty; + + /* With low_latency, tty_flip_buffer_push() doesn't put its + * real work through a workqueue, so the ldisc has a better + * chance to keep up with peak USB data rates. + */ + if (tty) { + tty_flip_buffer_push(tty); + wake_up_interruptible(&tty->read_wait); + } +} + +/* + * gs_recv_packet + * + * Called for each USB packet received. Reads the packet + * header and stuffs the data in the appropriate tty buffer. + * Returns 0 if successful, or a negative error number. + * + * Called during USB completion routine, on interrupt time. + * With port_lock. + */ +static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size) +{ + unsigned len; + struct tty_struct *tty; + + /* I/O completions can continue for a while after close(), until the + * request queue empties. Just discard any data we receive, until + * something reopens this TTY ... as if there were no HW flow control. + */ + tty = port->port_tty; + if (tty == NULL) { + pr_vdebug("%s: ttyGS%d, after close\n", + __func__, port->port_num); + return -EIO; + } + + len = tty_insert_flip_string(tty, packet, size); + if (len > 0) + tasklet_schedule(&port->push); + if (len < size) + pr_debug("%s: ttyGS%d, drop %d bytes\n", + __func__, port->port_num, size - len); + return 0; +} + /* * Context: caller owns port_lock, and port_usb is set */ @@ -422,9 +469,9 @@ __acquires(&port->port_lock) int status; struct tty_struct *tty; - /* no more rx if closed */ + /* no more rx if closed or throttled */ tty = port->port_tty; - if (!tty) + if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) break; req = list_entry(pool->next, struct usb_request, list); @@ -453,134 +500,36 @@ __acquires(&port->port_lock) return started; } -/* - * RX tasklet takes data out of the RX queue and hands it up to the TTY - * layer until it refuses to take any more data (or is throttled back). - * Then it issues reads for any further data. - * - * If the RX queue becomes full enough that no usb_request is queued, - * the OUT endpoint may begin NAKing as soon as its FIFO fills up. - * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) - * can be buffered before the TTY layer's buffers (currently 64 KB). - */ -static void gs_rx_push(unsigned long _port) +static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) { - struct gs_port *port = (void *)_port; - struct tty_struct *tty; - struct list_head *queue = &port->read_queue; - bool disconnect = false; - bool do_push = false; - - /* hand any queued data to the tty */ - spin_lock_irq(&port->port_lock); - tty = port->port_tty; - while (!list_empty(queue)) { - struct usb_request *req; - - req = list_first_entry(queue, struct usb_request, list); - - /* discard data if tty was closed */ - if (!tty) - goto recycle; - - /* leave data queued if tty was rx throttled */ - if (test_bit(TTY_THROTTLED, &tty->flags)) - break; - - switch (req->status) { - case -ESHUTDOWN: - disconnect = true; - pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); - break; - - default: - /* presumably a transient fault */ - pr_warning(PREFIX "%d: unexpected RX status %d\n", - port->port_num, req->status); - /* FALLTHROUGH */ - case 0: - /* normal completion */ - break; - } - - /* push data to (open) tty */ - if (req->actual) { - char *packet = req->buf; - unsigned size = req->actual; - unsigned n; - int count; - - /* we may have pushed part of this packet already... */ - n = port->n_read; - if (n) { - packet += n; - size -= n; - } - - count = tty_insert_flip_string(tty, packet, size); - if (count) - do_push = true; - if (count != size) { - /* stop pushing; TTY layer can't handle more */ - port->n_read += count; - pr_vdebug(PREFIX "%d: rx block %d/%d\n", - port->port_num, - count, req->actual); - break; - } - port->n_read = 0; - } -recycle: - list_move(&req->list, &port->read_pool); - } - - /* Push from tty to ldisc; this is immediate with low_latency, and - * may trigger callbacks to this driver ... so drop the spinlock. - */ - if (tty && do_push) { - spin_unlock_irq(&port->port_lock); - tty_flip_buffer_push(tty); - wake_up_interruptible(&tty->read_wait); - spin_lock_irq(&port->port_lock); - - /* tty may have been closed */ - tty = port->port_tty; - } - + int status; + struct gs_port *port = ep->driver_data; - /* We want our data queue to become empty ASAP, keeping data - * in the tty and ldisc (not here). If we couldn't push any - * this time around, there may be trouble unless there's an - * implicit tty_unthrottle() call on its way... - * - * REVISIT we should probably add a timer to keep the tasklet - * from starving ... but it's not clear that case ever happens. - */ - if (!list_empty(queue) && tty) { - if (!test_bit(TTY_THROTTLED, &tty->flags)) { - if (do_push) - tasklet_schedule(&port->push); - else - pr_warning(PREFIX "%d: RX not scheduled?\n", - port->port_num); - } - } + spin_lock(&port->port_lock); + list_add(&req->list, &port->read_pool); - /* If we're still connected, refill the USB RX queue. */ - if (!disconnect && port->port_usb) + switch (req->status) { + case 0: + /* normal completion */ + status = gs_recv_packet(port, req->buf, req->actual); + if (status && status != -EIO) + pr_debug("%s: %s %s err %d\n", + __func__, "recv", ep->name, status); gs_start_rx(port); + break; - spin_unlock_irq(&port->port_lock); -} - -static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) -{ - struct gs_port *port = ep->driver_data; + case -ESHUTDOWN: + /* disconnect */ + pr_vdebug("%s: %s shutdown\n", __func__, ep->name); + break; - /* Queue all received data until the tty layer is ready for it. */ - spin_lock(&port->port_lock); - list_add_tail(&req->list, &port->read_queue); - tasklet_schedule(&port->push); + default: + /* presumably a transient fault */ + pr_warning("%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + gs_start_rx(port); + break; + } spin_unlock(&port->port_lock); } @@ -676,7 +625,6 @@ static int gs_start_io(struct gs_port *port) } /* queue read requests */ - port->n_read = 0; started = gs_start_rx(port); /* unblock any pending writes into our circular buffer */ @@ -685,10 +633,9 @@ static int gs_start_io(struct gs_port *port) } else { gs_free_requests(ep, head); gs_free_requests(port->port_usb->in, &port->write_pool); - status = -EIO; } - return status; + return started ? 0 : status; } /*-------------------------------------------------------------------------*/ @@ -789,13 +736,10 @@ static int gs_open(struct tty_struct *tty, struct file *file) /* if connected, start the I/O stream */ if (port->port_usb) { - struct gserial *gser = port->port_usb; - pr_debug("gs_open: start ttyGS%d\n", port->port_num); gs_start_io(port); - if (gser->connect) - gser->connect(gser); + /* REVISIT for ACM, issue "network connected" event */ } pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); @@ -822,7 +766,6 @@ static int gs_writes_finished(struct gs_port *p) static void gs_close(struct tty_struct *tty, struct file *file) { struct gs_port *port = tty->driver_data; - struct gserial *gser; spin_lock_irq(&port->port_lock); @@ -842,31 +785,32 @@ static void gs_close(struct tty_struct *tty, struct file *file) port->openclose = true; port->open_count = 0; - gser = port->port_usb; - if (gser && gser->disconnect) - gser->disconnect(gser); + if (port->port_usb) + /* REVISIT for ACM, issue "network disconnected" event */; /* wait for circular write buffer to drain, disconnect, or at * most GS_CLOSE_TIMEOUT seconds; then discard the rest */ - if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { + if (gs_buf_data_avail(&port->port_write_buf) > 0 + && port->port_usb) { spin_unlock_irq(&port->port_lock); wait_event_interruptible_timeout(port->drain_wait, gs_writes_finished(port), GS_CLOSE_TIMEOUT * HZ); spin_lock_irq(&port->port_lock); - gser = port->port_usb; } /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't * let the push tasklet fire again until we're re-opened. */ - if (gser == NULL) + if (port->port_usb == NULL) gs_buf_free(&port->port_write_buf); else gs_buf_clear(&port->port_write_buf); + tasklet_kill(&port->push); + tty->driver_data = NULL; port->port_tty = NULL; @@ -967,35 +911,15 @@ static void gs_unthrottle(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; + unsigned started = 0; spin_lock_irqsave(&port->port_lock, flags); - if (port->port_usb) { - /* Kickstart read queue processing. We don't do xon/xoff, - * rts/cts, or other handshaking with the host, but if the - * read queue backs up enough we'll be NAKing OUT packets. - */ - tasklet_schedule(&port->push); - pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); - } + if (port->port_usb) + started = gs_start_rx(port); spin_unlock_irqrestore(&port->port_lock, flags); -} - -static int gs_break_ctl(struct tty_struct *tty, int duration) -{ - struct gs_port *port = tty->driver_data; - int status = 0; - struct gserial *gser; - - pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", - port->port_num, duration); - spin_lock_irq(&port->port_lock); - gser = port->port_usb; - if (gser && gser->send_break) - status = gser->send_break(gser, duration); - spin_unlock_irq(&port->port_lock); - - return status; + pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", + port->port_num, started); } static const struct tty_operations gs_tty_ops = { @@ -1007,7 +931,6 @@ static const struct tty_operations gs_tty_ops = { .write_room = gs_write_room, .chars_in_buffer = gs_chars_in_buffer, .unthrottle = gs_unthrottle, - .break_ctl = gs_break_ctl, }; /*-------------------------------------------------------------------------*/ @@ -1030,7 +953,6 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) tasklet_init(&port->push, gs_rx_push, (unsigned long) port); INIT_LIST_HEAD(&port->read_pool); - INIT_LIST_HEAD(&port->read_queue); INIT_LIST_HEAD(&port->write_pool); port->port_num = port_num; @@ -1075,7 +997,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count) gs_tty_driver->owner = THIS_MODULE; gs_tty_driver->driver_name = "g_serial"; - gs_tty_driver->name = PREFIX; + gs_tty_driver->name = "ttyGS"; /* uses dynamically assigned dev_t values */ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; @@ -1182,8 +1104,6 @@ void gserial_cleanup(void) ports[i].port = NULL; mutex_unlock(&ports[i].lock); - tasklet_kill(&port->push); - /* wait for old opens to finish */ wait_event(port->close_wait, gs_closed(port)); @@ -1255,17 +1175,14 @@ int gserial_connect(struct gserial *gser, u8 port_num) /* REVISIT if waiting on "carrier detect", signal. */ - /* if it's already open, start I/O ... and notify the serial - * protocol about open/close status (connect/disconnect). + /* REVISIT for ACM, issue "network connection" status notification: + * connected if open_count, else disconnected. */ + + /* if it's already open, start I/O */ if (port->open_count) { pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); gs_start_io(port); - if (gser->connect) - gser->connect(gser); - } else { - if (gser->disconnect) - gser->disconnect(gser); } spin_unlock_irqrestore(&port->port_lock, flags); @@ -1324,7 +1241,6 @@ void gserial_disconnect(struct gserial *gser) if (port->open_count == 0 && !port->openclose) gs_buf_free(&port->port_write_buf); gs_free_requests(gser->out, &port->read_pool); - gs_free_requests(gser->out, &port->read_queue); gs_free_requests(gser->in, &port->write_pool); spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/trunk/drivers/usb/gadget/u_serial.h b/trunk/drivers/usb/gadget/u_serial.h index af3910d01aea..7b561138f90e 100644 --- a/trunk/drivers/usb/gadget/u_serial.h +++ b/trunk/drivers/usb/gadget/u_serial.h @@ -23,7 +23,8 @@ * style I/O using the USB peripheral endpoints listed here, including * hookups to sysfs and /dev for each logical "tty" device. * - * REVISIT at least ACM could support tiocmget() if needed. + * REVISIT need TTY --> USB event flow too, so ACM can report open/close + * as carrier detect events. Model after ECM. There's more ACM state too. * * REVISIT someday, allow multiplexing several TTYs over these endpoints. */ @@ -40,17 +41,8 @@ struct gserial { /* REVISIT avoid this CDC-ACM support harder ... */ struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ - - /* notification callbacks */ - void (*connect)(struct gserial *p); - void (*disconnect)(struct gserial *p); - int (*send_break)(struct gserial *p, int duration); }; -/* utilities to allocate/free request and buffer */ -struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); -void gs_free_req(struct usb_ep *, struct usb_request *req); - /* port setup/teardown is handled by gadget driver */ int gserial_setup(struct usb_gadget *g, unsigned n_ports); void gserial_cleanup(void); diff --git a/trunk/drivers/usb/host/isp1760-hcd.c b/trunk/drivers/usb/host/isp1760-hcd.c index d22a84f86a33..c858f2adb929 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.c +++ b/trunk/drivers/usb/host/isp1760-hcd.c @@ -126,8 +126,9 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs) * doesn't quite work because some people have to enforce 32-bit access */ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, - __u32 __iomem *dst, u32 len) + __u32 __iomem *dst, u32 offset, u32 len) { + struct usb_hcd *hcd = priv_to_hcd(priv); u32 val; u8 *buff8; @@ -135,6 +136,11 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); return; } + isp1760_writel(offset, hcd->regs + HC_MEMORY_REG); + /* XXX + * 90nsec delay, the spec says something how this could be avoided. + */ + mdelay(1); while (len >= 4) { *src = __raw_readl(dst); @@ -981,20 +987,8 @@ static void do_atl_int(struct usb_hcd *usb_hcd) printk(KERN_ERR "qh is 0\n"); continue; } - isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs + - HC_MEMORY_REG); - isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + - HC_MEMORY_REG); - /* - * write bank1 address twice to ensure the 90ns delay (time - * between BANK0 write and the priv_read_copy() call is at - * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) - */ - isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + - HC_MEMORY_REG); - - priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs + - ISP_BANK(0), sizeof(ptd)); + priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, + atl_regs, sizeof(ptd)); dw1 = le32_to_cpu(ptd.dw1); dw2 = le32_to_cpu(ptd.dw2); @@ -1097,7 +1091,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) case IN_PID: priv_read_copy(priv, priv->atl_ints[queue_entry].data_buffer, - usb_hcd->regs + payload + ISP_BANK(1), + usb_hcd->regs + payload, payload, length); case OUT_PID: @@ -1128,11 +1122,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { /* short BULK received */ + printk(KERN_ERR "short bulk, %d instead %zu\n", length, + qtd->length); if (urb->transfer_flags & URB_SHORT_NOT_OK) { urb->status = -EREMOTEIO; - isp1760_dbg(priv, "short bulk, %d instead %zu " - "with URB_SHORT_NOT_OK flag.\n", - length, qtd->length); + printk(KERN_ERR "not okey\n"); } if (urb->status == -EINPROGRESS) @@ -1212,20 +1206,8 @@ static void do_intl_int(struct usb_hcd *usb_hcd) continue; } - isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs + - HC_MEMORY_REG); - isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + - HC_MEMORY_REG); - /* - * write bank1 address twice to ensure the 90ns delay (time - * between BANK0 write and the priv_read_copy() call is at - * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) - */ - isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + - HC_MEMORY_REG); - - priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs + - ISP_BANK(0), sizeof(ptd)); + priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs, + int_regs, sizeof(ptd)); dw1 = le32_to_cpu(ptd.dw1); dw3 = le32_to_cpu(ptd.dw3); check_int_err_status(le32_to_cpu(ptd.dw4)); @@ -1260,7 +1242,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd) case IN_PID: priv_read_copy(priv, priv->int_ints[queue_entry].data_buffer, - usb_hcd->regs + payload + ISP_BANK(1), + usb_hcd->regs + payload , payload, length); case OUT_PID: @@ -1633,7 +1615,8 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return -EPIPE; } - return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); + isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); + return 0; } static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, diff --git a/trunk/drivers/usb/host/isp1760-hcd.h b/trunk/drivers/usb/host/isp1760-hcd.h index 4377277667d9..6473dd86993c 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.h +++ b/trunk/drivers/usb/host/isp1760-hcd.h @@ -54,8 +54,6 @@ void deinit_kmem_cache(void); #define BUFFER_MAP 0x7 #define HC_MEMORY_REG 0x33c -#define ISP_BANK(x) ((x) << 16) - #define HC_PORT1_CTRL 0x374 #define PORT1_POWER (3 << 3) #define PORT1_INIT1 (1 << 7) @@ -121,9 +119,6 @@ struct inter_packet_info { typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, struct isp1760_qtd *qtd); -#define isp1760_dbg(priv, fmt, args...) \ - dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args) - #define isp1760_info(priv, fmt, args...) \ dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) diff --git a/trunk/drivers/usb/host/ohci-hcd.c b/trunk/drivers/usb/host/ohci-hcd.c index 89901962cbfd..26bc47941d01 100644 --- a/trunk/drivers/usb/host/ohci-hcd.c +++ b/trunk/drivers/usb/host/ohci-hcd.c @@ -86,21 +86,6 @@ static void ohci_stop (struct usb_hcd *hcd); static int ohci_restart (struct ohci_hcd *ohci); #endif -#ifdef CONFIG_PCI -static void quirk_amd_pll(int state); -static void amd_iso_dev_put(void); -#else -static inline void quirk_amd_pll(int state) -{ - return; -} -static inline void amd_iso_dev_put(void) -{ - return; -} -#endif - - #include "ohci-hub.c" #include "ohci-dbg.c" #include "ohci-mem.c" @@ -498,9 +483,6 @@ static int ohci_init (struct ohci_hcd *ohci) int ret; struct usb_hcd *hcd = ohci_to_hcd(ohci); - if (distrust_firmware) - ohci->flags |= OHCI_QUIRK_HUB_POWER; - disable (ohci); ohci->regs = hcd->regs; @@ -707,8 +689,7 @@ static int ohci_run (struct ohci_hcd *ohci) temp |= RH_A_NOCP; temp &= ~(RH_A_POTPGT | RH_A_NPS); ohci_writel (ohci, temp, &ohci->regs->roothub.a); - } else if ((ohci->flags & OHCI_QUIRK_AMD756) || - (ohci->flags & OHCI_QUIRK_HUB_POWER)) { + } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { /* hub power always on; required for AMD-756 and some * Mac platforms. ganged overcurrent reporting, if any. */ @@ -901,8 +882,6 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_zfmicro(ohci)) del_timer(&ohci->unlink_watchdog); - if (quirk_amdiso(ohci)) - amd_iso_dev_put(); remove_debug_files (ohci); ohci_mem_cleanup (ohci); diff --git a/trunk/drivers/usb/host/ohci-hub.c b/trunk/drivers/usb/host/ohci-hub.c index 439beb784f3e..b56739221d11 100644 --- a/trunk/drivers/usb/host/ohci-hub.c +++ b/trunk/drivers/usb/host/ohci-hub.c @@ -483,13 +483,6 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf) length++; } - /* Some broken controllers never turn off RHCS in the interrupt - * status register. For their sake we won't re-enable RHSC - * interrupts if the flag is already set. - */ - if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC) - changed = 1; - /* look at each port */ for (i = 0; i < ohci->num_ports; i++) { u32 status = roothub_portstatus (ohci, i); @@ -579,6 +572,8 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port) return 0; } +static void start_hnp(struct ohci_hcd *ohci); + #else #define ohci_start_port_reset NULL @@ -765,7 +760,7 @@ static int ohci_hub_control ( #ifdef CONFIG_USB_OTG if (hcd->self.otg_port == (wIndex + 1) && hcd->self.b_hnp_enable) - ohci->start_hnp(ohci); + start_hnp(ohci); else #endif ohci_writel (ohci, RH_PS_PSS, diff --git a/trunk/drivers/usb/host/ohci-omap.c b/trunk/drivers/usb/host/ohci-omap.c index 3d532b709670..94dfca02f7e1 100644 --- a/trunk/drivers/usb/host/ohci-omap.c +++ b/trunk/drivers/usb/host/ohci-omap.c @@ -225,7 +225,6 @@ static int ohci_omap_init(struct usb_hcd *hcd) dev_err(hcd->self.controller, "can't find transceiver\n"); return -ENODEV; } - ohci->start_hnp = start_hnp; } #endif @@ -261,7 +260,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) omap_cfg_reg(W4_USB_HIGHZ); } ohci_writel(ohci, rh, &ohci->regs->roothub.a); - ohci->flags &= ~OHCI_QUIRK_HUB_POWER; + distrust_firmware = 0; } else if (machine_is_nokia770()) { /* We require a self-powered hub, which should have * plenty of power. */ diff --git a/trunk/drivers/usb/host/ohci-pci.c b/trunk/drivers/usb/host/ohci-pci.c index 083e8df0a817..4696cc912e16 100644 --- a/trunk/drivers/usb/host/ohci-pci.c +++ b/trunk/drivers/usb/host/ohci-pci.c @@ -18,28 +18,6 @@ #error "This file is PCI bus glue. CONFIG_PCI must be defined." #endif -#include -#include - - -/* constants used to work around PM-related transfer - * glitches in some AMD 700 series southbridges - */ -#define AB_REG_BAR 0xf0 -#define AB_INDX(addr) ((addr) + 0x00) -#define AB_DATA(addr) ((addr) + 0x04) -#define AX_INDXC 0X30 -#define AX_DATAC 0x34 - -#define NB_PCIE_INDX_ADDR 0xe0 -#define NB_PCIE_INDX_DATA 0xe4 -#define PCIE_P_CNTL 0x10040 -#define BIF_NB 0x10002 - -static struct pci_dev *amd_smbus_dev; -static struct pci_dev *amd_hb_dev; -static int amd_ohci_iso_count; - /*-------------------------------------------------------------------------*/ static int broken_suspend(struct usb_hcd *hcd) @@ -165,103 +143,6 @@ static int ohci_quirk_nec(struct usb_hcd *hcd) return 0; } -static int ohci_quirk_amd700(struct usb_hcd *hcd) -{ - struct ohci_hcd *ohci = hcd_to_ohci(hcd); - u8 rev = 0; - - if (!amd_smbus_dev) - amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, - PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); - if (!amd_smbus_dev) - return 0; - - pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); - if ((rev > 0x3b) || (rev < 0x30)) { - pci_dev_put(amd_smbus_dev); - amd_smbus_dev = NULL; - return 0; - } - - amd_ohci_iso_count++; - - if (!amd_hb_dev) - amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL); - - ohci->flags |= OHCI_QUIRK_AMD_ISO; - ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n"); - - return 0; -} - -/* - * The hardware normally enables the A-link power management feature, which - * lets the system lower the power consumption in idle states. - * - * Assume the system is configured to have USB 1.1 ISO transfers going - * to or from a USB device. Without this quirk, that stream may stutter - * or have breaks occasionally. For transfers going to speakers, this - * makes a very audible mess... - * - * That audio playback corruption is due to the audio stream getting - * interrupted occasionally when the link goes in lower power state - * This USB quirk prevents the link going into that lower power state - * during audio playback or other ISO operations. - */ -static void quirk_amd_pll(int on) -{ - u32 addr; - u32 val; - u32 bit = (on > 0) ? 1 : 0; - - pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr); - - /* BIT names/meanings are NDA-protected, sorry ... */ - - outl(AX_INDXC, AB_INDX(addr)); - outl(0x40, AB_DATA(addr)); - outl(AX_DATAC, AB_INDX(addr)); - val = inl(AB_DATA(addr)); - val &= ~((1 << 3) | (1 << 4) | (1 << 9)); - val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9); - outl(val, AB_DATA(addr)); - - if (amd_hb_dev) { - addr = PCIE_P_CNTL; - pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); - - pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); - val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); - val |= bit | (bit << 3) | (bit << 12); - val |= ((!bit) << 4) | ((!bit) << 9); - pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); - - addr = BIF_NB; - pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); - - pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); - val &= ~(1 << 8); - val |= bit << 8; - pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); - } -} - -static void amd_iso_dev_put(void) -{ - amd_ohci_iso_count--; - if (amd_ohci_iso_count == 0) { - if (amd_smbus_dev) { - pci_dev_put(amd_smbus_dev); - amd_smbus_dev = NULL; - } - if (amd_hb_dev) { - pci_dev_put(amd_hb_dev); - amd_hb_dev = NULL; - } - } - -} - /* List of quirks for OHCI */ static const struct pci_device_id ohci_pci_quirks[] = { { @@ -300,19 +181,6 @@ static const struct pci_device_id ohci_pci_quirks[] = { PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), .driver_data = (unsigned long) broken_suspend, }, - { - PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397), - .driver_data = (unsigned long)ohci_quirk_amd700, - }, - { - PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398), - .driver_data = (unsigned long)ohci_quirk_amd700, - }, - { - PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), - .driver_data = (unsigned long)ohci_quirk_amd700, - }, - /* FIXME for some of the early AMD 760 southbridges, OHCI * won't work at all. blacklist them. */ diff --git a/trunk/drivers/usb/host/ohci-q.c b/trunk/drivers/usb/host/ohci-q.c index c2d80f80448b..6a9b4c557953 100644 --- a/trunk/drivers/usb/host/ohci-q.c +++ b/trunk/drivers/usb/host/ohci-q.c @@ -49,9 +49,6 @@ __acquires(ohci->lock) switch (usb_pipetype (urb->pipe)) { case PIPE_ISOCHRONOUS: ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; - if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 - && quirk_amdiso(ohci)) - quirk_amd_pll(1); break; case PIPE_INTERRUPT: ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; @@ -680,9 +677,6 @@ static void td_submit_urb ( data + urb->iso_frame_desc [cnt].offset, urb->iso_frame_desc [cnt].length, urb, cnt); } - if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 - && quirk_amdiso(ohci)) - quirk_amd_pll(0); periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; break; diff --git a/trunk/drivers/usb/host/ohci.h b/trunk/drivers/usb/host/ohci.h index faf622eafce7..dc544ddc7849 100644 --- a/trunk/drivers/usb/host/ohci.h +++ b/trunk/drivers/usb/host/ohci.h @@ -371,7 +371,6 @@ struct ohci_hcd { * other external transceivers should be software-transparent */ struct otg_transceiver *transceiver; - void (*start_hnp)(struct ohci_hcd *ohci); /* * memory management for queue data structures @@ -400,8 +399,6 @@ struct ohci_hcd { #define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ #define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ -#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ -#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ @@ -429,10 +426,6 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) { return ohci->flags & OHCI_QUIRK_ZFMICRO; } -static inline int quirk_amdiso(struct ohci_hcd *ohci) -{ - return ohci->flags & OHCI_QUIRK_AMD_ISO; -} #else static inline int quirk_nec(struct ohci_hcd *ohci) { @@ -442,10 +435,6 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) { return 0; } -static inline int quirk_amdiso(struct ohci_hcd *ohci) -{ - return 0; -} #endif /* convert between an hcd pointer and the corresponding ohci_hcd */ diff --git a/trunk/drivers/usb/host/r8a66597-hcd.c b/trunk/drivers/usb/host/r8a66597-hcd.c index ea7126f99cab..d5f02dddb120 100644 --- a/trunk/drivers/usb/host/r8a66597-hcd.c +++ b/trunk/drivers/usb/host/r8a66597-hcd.c @@ -964,34 +964,11 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum) disable_irq_nrdy(r8a66597, pipenum); } -static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) -{ - mod_timer(&r8a66597->rh_timer, - jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); -} - -static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, - int connect) -{ - struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; - - rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; - rh->scount = R8A66597_MAX_SAMPLING; - if (connect) - rh->port |= 1 << USB_PORT_FEAT_CONNECTION; - else - rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION); - rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION; - - r8a66597_root_hub_start_polling(r8a66597); -} - /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) { if (syssts == SE0) { - r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); return; } @@ -1025,10 +1002,13 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port) { struct r8a66597_device *dev = r8a66597->root_hub[port].dev; + r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION); + r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION); + disable_r8a66597_pipe_all(r8a66597, dev); free_usb_address(r8a66597, dev); - start_root_hub_sampling(r8a66597, port, 0); + r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); } /* this function must be called with interrupt disabled */ @@ -1571,6 +1551,23 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597) } } +static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) +{ + mod_timer(&r8a66597->rh_timer, + jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); +} + +static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port) +{ + struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; + + rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; + rh->scount = R8A66597_MAX_SAMPLING; + r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) + | (1 << USB_PORT_FEAT_C_CONNECTION); + r8a66597_root_hub_start_polling(r8a66597); +} + static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) { struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); @@ -1597,7 +1594,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) r8a66597_bclr(r8a66597, ATTCHE, INTENB2); /* start usb bus sampling */ - start_root_hub_sampling(r8a66597, 1, 1); + start_root_hub_sampling(r8a66597, 1); } if (mask2 & DTCH) { r8a66597_write(r8a66597, ~DTCH, INTSTS2); @@ -1612,7 +1609,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) r8a66597_bclr(r8a66597, ATTCHE, INTENB1); /* start usb bus sampling */ - start_root_hub_sampling(r8a66597, 0, 1); + start_root_hub_sampling(r8a66597, 0); } if (mask1 & DTCH) { r8a66597_write(r8a66597, ~DTCH, INTSTS1); diff --git a/trunk/drivers/usb/misc/Kconfig b/trunk/drivers/usb/misc/Kconfig index 4ea50e0abcbb..001789c9a11a 100644 --- a/trunk/drivers/usb/misc/Kconfig +++ b/trunk/drivers/usb/misc/Kconfig @@ -42,6 +42,16 @@ config USB_ADUTUX To compile this driver as a module, choose M here. The module will be called adutux. +config USB_AUERSWALD + tristate "USB Auerswald ISDN support" + depends on USB + help + Say Y here if you want to connect an Auerswald USB ISDN Device + to your computer's USB port. + + To compile this driver as a module, choose M here: the + module will be called auerswald. + config USB_RIO500 tristate "USB Diamond Rio500 support" depends on USB diff --git a/trunk/drivers/usb/misc/Makefile b/trunk/drivers/usb/misc/Makefile index 45b4e12afb08..aba091cb5ec0 100644 --- a/trunk/drivers/usb/misc/Makefile +++ b/trunk/drivers/usb/misc/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_USB_ADUTUX) += adutux.o obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o +obj-$(CONFIG_USB_AUERSWALD) += auerswald.o obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o obj-$(CONFIG_USB_CYTHERM) += cytherm.o diff --git a/trunk/drivers/usb/misc/auerswald.c b/trunk/drivers/usb/misc/auerswald.c new file mode 100644 index 000000000000..d2f61d5510e7 --- /dev/null +++ b/trunk/drivers/usb/misc/auerswald.c @@ -0,0 +1,2152 @@ +/*****************************************************************************/ +/* + * auerswald.c -- Auerswald PBX/System Telephone usb driver. + * + * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de) + * + * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl) + * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + /*****************************************************************************/ + +/* Standard Linux module include files */ +#include +#include +#include +#include +#include +#include +#include +#include + +/*-------------------------------------------------------------------*/ +/* Debug support */ +#ifdef DEBUG +#define dump( adr, len) \ +do { \ + unsigned int u; \ + printk (KERN_DEBUG); \ + for (u = 0; u < len; u++) \ + printk (" %02X", adr[u] & 0xFF); \ + printk ("\n"); \ +} while (0) +#else +#define dump( adr, len) +#endif + +/*-------------------------------------------------------------------*/ +/* Version Information */ +#define DRIVER_VERSION "0.9.11" +#define DRIVER_AUTHOR "Wolfgang Mües " +#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver" + +/*-------------------------------------------------------------------*/ +/* Private declarations for Auerswald USB driver */ + +/* Auerswald Vendor ID */ +#define ID_AUERSWALD 0x09BF + +#define AUER_MINOR_BASE 112 /* auerswald driver minor number */ + +/* we can have up to this number of device plugged in at once */ +#define AUER_MAX_DEVICES 16 + + +/* Number of read buffers for each device */ +#define AU_RBUFFERS 10 + +/* Number of chain elements for each control chain */ +#define AUCH_ELEMENTS 20 + +/* Number of retries in communication */ +#define AU_RETRIES 10 + +/*-------------------------------------------------------------------*/ +/* vendor specific protocol */ +/* Header Byte */ +#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */ +#define AUH_DIRECT 0x00 /* data is for USB device */ +#define AUH_INDIRECT 0x80 /* USB device is relay */ + +#define AUH_SPLITMASK 0x40 /* mask for split bit */ +#define AUH_UNSPLIT 0x00 /* data block is full-size */ +#define AUH_SPLIT 0x40 /* data block is part of a larger one, + split-byte follows */ + +#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */ +#define AUH_TYPESIZE 0x40 /* different types */ +#define AUH_DCHANNEL 0x00 /* D channel data */ +#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */ +#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */ +/* 0x03..0x0F reserved for driver internal use */ +#define AUH_COMMAND 0x10 /* Command channel */ +#define AUH_BPROT 0x11 /* Configuration block protocol */ +#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */ +#define AUH_TAPI 0x13 /* telephone api data (ATD) */ +/* 0x14..0x3F reserved for other protocols */ +#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */ +#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */ + +#define AUH_SIZE 1 /* Size of Header Byte */ + +/* Split Byte. Only present if split bit in header byte set.*/ +#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */ +#define AUS_FIRST 0x80 /* first block */ +#define AUS_FOLLOW 0x00 /* following block */ + +#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */ +#define AUS_END 0x40 /* last block */ +#define AUS_NOEND 0x00 /* not the last block */ + +#define AUS_LENMASK 0x3F /* mask for block length information */ + +/* Request types */ +#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */ +#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */ + +/* Vendor Requests */ +#define AUV_GETINFO 0x00 /* GetDeviceInfo */ +#define AUV_WBLOCK 0x01 /* Write Block */ +#define AUV_RBLOCK 0x02 /* Read Block */ +#define AUV_CHANNELCTL 0x03 /* Channel Control */ +#define AUV_DUMMY 0x04 /* Dummy Out for retry */ + +/* Device Info Types */ +#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */ +#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */ +#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */ + +/* Interrupt endpoint definitions */ +#define AU_IRQENDP 1 /* Endpoint number */ +#define AU_IRQCMDID 16 /* Command-block ID */ +#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */ +#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */ + +/* Device String Descriptors */ +#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */ +#define AUSI_DEVICE 2 /* Name of the Device */ +#define AUSI_SERIALNR 3 /* Serial Number */ +#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */ + +#define AUSI_DLEN 100 /* Max. Length of Device Description */ + +#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */ + +/*-------------------------------------------------------------------*/ +/* External data structures / Interface */ +typedef struct +{ + char __user *buf; /* return buffer for string contents */ + unsigned int bsize; /* size of return buffer */ +} audevinfo_t,*paudevinfo_t; + +/* IO controls */ +#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */ +#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */ +#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */ +#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */ +#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */ +#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */ +#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */ +/* 'U' 0xF7..0xFF reseved */ + +/*-------------------------------------------------------------------*/ +/* Internal data structures */ + +/* ..................................................................*/ +/* urb chain element */ +struct auerchain; /* forward for circular reference */ +typedef struct +{ + struct auerchain *chain; /* pointer to the chain to which this element belongs */ + struct urb * urbp; /* pointer to attached urb */ + void *context; /* saved URB context */ + usb_complete_t complete; /* saved URB completion function */ + struct list_head list; /* to include element into a list */ +} auerchainelement_t,*pauerchainelement_t; + +/* urb chain */ +typedef struct auerchain +{ + pauerchainelement_t active; /* element which is submitted to urb */ + spinlock_t lock; /* protection agains interrupts */ + struct list_head waiting_list; /* list of waiting elements */ + struct list_head free_list; /* list of available elements */ +} auerchain_t,*pauerchain_t; + +/* urb blocking completion helper struct */ +typedef struct +{ + wait_queue_head_t wqh; /* wait for completion */ + unsigned int done; /* completion flag */ +} auerchain_chs_t,*pauerchain_chs_t; + +/* ...................................................................*/ +/* buffer element */ +struct auerbufctl; /* forward */ +typedef struct +{ + char *bufp; /* reference to allocated data buffer */ + unsigned int len; /* number of characters in data buffer */ + unsigned int retries; /* for urb retries */ + struct usb_ctrlrequest *dr; /* for setup data in control messages */ + struct urb * urbp; /* USB urb */ + struct auerbufctl *list; /* pointer to list */ + struct list_head buff_list; /* reference to next buffer in list */ +} auerbuf_t,*pauerbuf_t; + +/* buffer list control block */ +typedef struct auerbufctl +{ + spinlock_t lock; /* protection in interrupt */ + struct list_head free_buff_list;/* free buffers */ + struct list_head rec_buff_list; /* buffers with receive data */ +} auerbufctl_t,*pauerbufctl_t; + +/* ...................................................................*/ +/* service context */ +struct auerscon; /* forward */ +typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t); +typedef void (*auer_disconn_t) (struct auerscon*); +typedef struct auerscon +{ + unsigned int id; /* protocol service id AUH_xxxx */ + auer_dispatch_t dispatch; /* dispatch read buffer */ + auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */ +} auerscon_t,*pauerscon_t; + +/* ...................................................................*/ +/* USB device context */ +typedef struct +{ + struct mutex mutex; /* protection in user context */ + char name[20]; /* name of the /dev/usb entry */ + unsigned int dtindex; /* index in the device table */ + struct usb_device * usbdev; /* USB device handle */ + int open_count; /* count the number of open character channels */ + char dev_desc[AUSI_DLEN];/* for storing a textual description */ + unsigned int maxControlLength; /* max. Length of control paket (without header) */ + struct urb * inturbp; /* interrupt urb */ + char * intbufp; /* data buffer for interrupt urb */ + unsigned int irqsize; /* size of interrupt endpoint 1 */ + struct auerchain controlchain; /* for chaining of control messages */ + auerbufctl_t bufctl; /* Buffer control for control transfers */ + pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */ + unsigned int version; /* Version of the device */ + wait_queue_head_t bufferwait; /* wait for a control buffer */ +} auerswald_t,*pauerswald_t; + +/* ................................................................... */ +/* character device context */ +typedef struct +{ + struct mutex mutex; /* protection in user context */ + pauerswald_t auerdev; /* context pointer of assigned device */ + auerbufctl_t bufctl; /* controls the buffer chain */ + auerscon_t scontext; /* service context */ + wait_queue_head_t readwait; /* for synchronous reading */ + struct mutex readmutex; /* protection against multiple reads */ + pauerbuf_t readbuf; /* buffer held for partial reading */ + unsigned int readoffset; /* current offset in readbuf */ + unsigned int removed; /* is != 0 if device is removed */ +} auerchar_t,*pauerchar_t; + + +/*-------------------------------------------------------------------*/ +/* Forwards */ +static void auerswald_ctrlread_complete (struct urb * urb); +static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp); +static struct usb_driver auerswald_driver; + + +/*-------------------------------------------------------------------*/ +/* USB chain helper functions */ +/* -------------------------- */ + +/* completion function for chained urbs */ +static void auerchain_complete (struct urb * urb) +{ + unsigned long flags; + int result; + + /* get pointer to element and to chain */ + pauerchainelement_t acep = urb->context; + pauerchain_t acp = acep->chain; + + /* restore original entries in urb */ + urb->context = acep->context; + urb->complete = acep->complete; + + dbg ("auerchain_complete called"); + + /* call original completion function + NOTE: this function may lead to more urbs submitted into the chain. + (no chain lock at calling complete()!) + acp->active != NULL is protecting us against recursion.*/ + urb->complete (urb); + + /* detach element from chain data structure */ + spin_lock_irqsave (&acp->lock, flags); + if (acp->active != acep) /* paranoia debug check */ + dbg ("auerchain_complete: completion on non-active element called!"); + else + acp->active = NULL; + + /* add the used chain element to the list of free elements */ + list_add_tail (&acep->list, &acp->free_list); + acep = NULL; + + /* is there a new element waiting in the chain? */ + if (!acp->active && !list_empty (&acp->waiting_list)) { + /* yes: get the entry */ + struct list_head *tmp = acp->waiting_list.next; + list_del (tmp); + acep = list_entry (tmp, auerchainelement_t, list); + acp->active = acep; + } + spin_unlock_irqrestore (&acp->lock, flags); + + /* submit the new urb */ + if (acep) { + urb = acep->urbp; + dbg ("auerchain_complete: submitting next urb from chain"); + urb->status = 0; /* needed! */ + result = usb_submit_urb(urb, GFP_ATOMIC); + + /* check for submit errors */ + if (result) { + urb->status = result; + dbg("auerchain_complete: usb_submit_urb with error code %d", result); + /* and do error handling via *this* completion function (recursive) */ + auerchain_complete( urb); + } + } else { + /* simple return without submitting a new urb. + The empty chain is detected with acp->active == NULL. */ + }; +} + + +/* submit function for chained urbs + this function may be called from completion context or from user space! + early = 1 -> submit in front of chain +*/ +static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early) +{ + int result; + unsigned long flags; + pauerchainelement_t acep = NULL; + + dbg ("auerchain_submit_urb called"); + + /* try to get a chain element */ + spin_lock_irqsave (&acp->lock, flags); + if (!list_empty (&acp->free_list)) { + /* yes: get the entry */ + struct list_head *tmp = acp->free_list.next; + list_del (tmp); + acep = list_entry (tmp, auerchainelement_t, list); + } + spin_unlock_irqrestore (&acp->lock, flags); + + /* if no chain element available: return with error */ + if (!acep) { + return -ENOMEM; + } + + /* fill in the new chain element values */ + acep->chain = acp; + acep->context = urb->context; + acep->complete = urb->complete; + acep->urbp = urb; + INIT_LIST_HEAD (&acep->list); + + /* modify urb */ + urb->context = acep; + urb->complete = auerchain_complete; + urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */ + + /* add element to chain - or start it immediately */ + spin_lock_irqsave (&acp->lock, flags); + if (acp->active) { + /* there is traffic in the chain, simple add element to chain */ + if (early) { + dbg ("adding new urb to head of chain"); + list_add (&acep->list, &acp->waiting_list); + } else { + dbg ("adding new urb to end of chain"); + list_add_tail (&acep->list, &acp->waiting_list); + } + acep = NULL; + } else { + /* the chain is empty. Prepare restart */ + acp->active = acep; + } + /* Spin has to be removed before usb_submit_urb! */ + spin_unlock_irqrestore (&acp->lock, flags); + + /* Submit urb if immediate restart */ + if (acep) { + dbg("submitting urb immediate"); + urb->status = 0; /* needed! */ + result = usb_submit_urb(urb, GFP_ATOMIC); + /* check for submit errors */ + if (result) { + urb->status = result; + dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result); + /* and do error handling via completion function */ + auerchain_complete( urb); + } + } + + return 0; +} + +/* submit function for chained urbs + this function may be called from completion context or from user space! +*/ +static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb) +{ + return auerchain_submit_urb_list (acp, urb, 0); +} + +/* cancel an urb which is submitted to the chain + the result is 0 if the urb is cancelled, or -EINPROGRESS if + the function is successfully started. +*/ +static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb) +{ + unsigned long flags; + struct urb * urbp; + pauerchainelement_t acep; + struct list_head *tmp; + + dbg ("auerchain_unlink_urb called"); + + /* search the chain of waiting elements */ + spin_lock_irqsave (&acp->lock, flags); + list_for_each (tmp, &acp->waiting_list) { + acep = list_entry (tmp, auerchainelement_t, list); + if (acep->urbp == urb) { + list_del (tmp); + urb->context = acep->context; + urb->complete = acep->complete; + list_add_tail (&acep->list, &acp->free_list); + spin_unlock_irqrestore (&acp->lock, flags); + dbg ("unlink waiting urb"); + urb->status = -ENOENT; + urb->complete (urb); + return 0; + } + } + /* not found. */ + spin_unlock_irqrestore (&acp->lock, flags); + + /* get the active urb */ + acep = acp->active; + if (acep) { + urbp = acep->urbp; + + /* check if we have to cancel the active urb */ + if (urbp == urb) { + /* note that there is a race condition between the check above + and the unlink() call because of no lock. This race is harmless, + because the usb module will detect the unlink() after completion. + We can't use the acp->lock here because the completion function + wants to grab it. + */ + dbg ("unlink active urb"); + return usb_unlink_urb (urbp); + } + } + + /* not found anyway + ... is some kind of success + */ + dbg ("urb to unlink not found in chain"); + return 0; +} + +/* cancel all urbs which are in the chain. + this function must not be called from interrupt or completion handler. +*/ +static void auerchain_unlink_all (pauerchain_t acp) +{ + unsigned long flags; + struct urb * urbp; + pauerchainelement_t acep; + + dbg ("auerchain_unlink_all called"); + + /* clear the chain of waiting elements */ + spin_lock_irqsave (&acp->lock, flags); + while (!list_empty (&acp->waiting_list)) { + /* get the next entry */ + struct list_head *tmp = acp->waiting_list.next; + list_del (tmp); + acep = list_entry (tmp, auerchainelement_t, list); + urbp = acep->urbp; + urbp->context = acep->context; + urbp->complete = acep->complete; + list_add_tail (&acep->list, &acp->free_list); + spin_unlock_irqrestore (&acp->lock, flags); + dbg ("unlink waiting urb"); + urbp->status = -ENOENT; + urbp->complete (urbp); + spin_lock_irqsave (&acp->lock, flags); + } + spin_unlock_irqrestore (&acp->lock, flags); + + /* clear the active urb */ + acep = acp->active; + if (acep) { + urbp = acep->urbp; + dbg ("unlink active urb"); + usb_kill_urb (urbp); + } +} + + +/* free the chain. + this function must not be called from interrupt or completion handler. +*/ +static void auerchain_free (pauerchain_t acp) +{ + unsigned long flags; + pauerchainelement_t acep; + + dbg ("auerchain_free called"); + + /* first, cancel all pending urbs */ + auerchain_unlink_all (acp); + + /* free the elements */ + spin_lock_irqsave (&acp->lock, flags); + while (!list_empty (&acp->free_list)) { + /* get the next entry */ + struct list_head *tmp = acp->free_list.next; + list_del (tmp); + spin_unlock_irqrestore (&acp->lock, flags); + acep = list_entry (tmp, auerchainelement_t, list); + kfree (acep); + spin_lock_irqsave (&acp->lock, flags); + } + spin_unlock_irqrestore (&acp->lock, flags); +} + + +/* Init the chain control structure */ +static void auerchain_init (pauerchain_t acp) +{ + /* init the chain data structure */ + acp->active = NULL; + spin_lock_init (&acp->lock); + INIT_LIST_HEAD (&acp->waiting_list); + INIT_LIST_HEAD (&acp->free_list); +} + +/* setup a chain. + It is assumed that there is no concurrency while setting up the chain + requirement: auerchain_init() +*/ +static int auerchain_setup (pauerchain_t acp, unsigned int numElements) +{ + pauerchainelement_t acep; + + dbg ("auerchain_setup called with %d elements", numElements); + + /* fill the list of free elements */ + for (;numElements; numElements--) { + acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL); + if (!acep) + goto ac_fail; + INIT_LIST_HEAD (&acep->list); + list_add_tail (&acep->list, &acp->free_list); + } + return 0; + +ac_fail:/* free the elements */ + while (!list_empty (&acp->free_list)) { + /* get the next entry */ + struct list_head *tmp = acp->free_list.next; + list_del (tmp); + acep = list_entry (tmp, auerchainelement_t, list); + kfree (acep); + } + return -ENOMEM; +} + + +/* completion handler for synchronous chained URBs */ +static void auerchain_blocking_completion (struct urb *urb) +{ + pauerchain_chs_t pchs = urb->context; + pchs->done = 1; + wmb(); + wake_up (&pchs->wqh); +} + + +/* Starts chained urb and waits for completion or timeout */ +static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length) +{ + auerchain_chs_t chs; + int status; + + dbg ("auerchain_start_wait_urb called"); + init_waitqueue_head (&chs.wqh); + chs.done = 0; + + urb->context = &chs; + status = auerchain_submit_urb (acp, urb); + if (status) + /* something went wrong */ + return status; + + timeout = wait_event_timeout(chs.wqh, chs.done, timeout); + + if (!timeout && !chs.done) { + if (urb->status != -EINPROGRESS) { /* No callback?!! */ + dbg ("auerchain_start_wait_urb: raced timeout"); + status = urb->status; + } else { + dbg ("auerchain_start_wait_urb: timeout"); + auerchain_unlink_urb (acp, urb); /* remove urb safely */ + status = -ETIMEDOUT; + } + } else + status = urb->status; + + if (status >= 0) + *actual_length = urb->actual_length; + + return status; +} + + +/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion + acp: pointer to the auerchain + dev: pointer to the usb device to send the message to + pipe: endpoint "pipe" to send the message to + request: USB message request value + requesttype: USB message request type value + value: USB message value + index: USB message index value + data: pointer to the data to send + size: length in bytes of the data to send + timeout: time to wait for the message to complete before timing out (if 0 the wait is forever) + + This function sends a simple control message to a specified endpoint + and waits for the message to complete, or timeout. + + If successful, it returns the transferred length, otherwise a negative error number. + + Don't use this function from within an interrupt context, like a + bottom half handler. If you need an asynchronous message, or need to send + a message from within interrupt context, use auerchain_submit_urb() +*/ +static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, + __u16 value, __u16 index, void *data, __u16 size, int timeout) +{ + int ret; + struct usb_ctrlrequest *dr; + struct urb *urb; + int uninitialized_var(length); + + dbg ("auerchain_control_msg"); + dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); + if (!dr) + return -ENOMEM; + urb = usb_alloc_urb (0, GFP_KERNEL); + if (!urb) { + kfree (dr); + return -ENOMEM; + } + + dr->bRequestType = requesttype; + dr->bRequest = request; + dr->wValue = cpu_to_le16 (value); + dr->wIndex = cpu_to_le16 (index); + dr->wLength = cpu_to_le16 (size); + + usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */ + auerchain_blocking_completion, NULL); + ret = auerchain_start_wait_urb (acp, urb, timeout, &length); + + usb_free_urb (urb); + kfree (dr); + + if (ret < 0) + return ret; + else + return length; +} + + +/*-------------------------------------------------------------------*/ +/* Buffer List helper functions */ + +/* free a single auerbuf */ +static void auerbuf_free (pauerbuf_t bp) +{ + kfree(bp->bufp); + kfree(bp->dr); + usb_free_urb(bp->urbp); + kfree(bp); +} + +/* free the buffers from an auerbuf list */ +static void auerbuf_free_list (struct list_head *q) +{ + struct list_head *tmp; + struct list_head *p; + pauerbuf_t bp; + + dbg ("auerbuf_free_list"); + for (p = q->next; p != q;) { + bp = list_entry (p, auerbuf_t, buff_list); + tmp = p->next; + list_del (p); + p = tmp; + auerbuf_free (bp); + } +} + +/* init the members of a list control block */ +static void auerbuf_init (pauerbufctl_t bcp) +{ + dbg ("auerbuf_init"); + spin_lock_init (&bcp->lock); + INIT_LIST_HEAD (&bcp->free_buff_list); + INIT_LIST_HEAD (&bcp->rec_buff_list); +} + +/* free all buffers from an auerbuf chain */ +static void auerbuf_free_buffers (pauerbufctl_t bcp) +{ + unsigned long flags; + dbg ("auerbuf_free_buffers"); + + spin_lock_irqsave (&bcp->lock, flags); + + auerbuf_free_list (&bcp->free_buff_list); + auerbuf_free_list (&bcp->rec_buff_list); + + spin_unlock_irqrestore (&bcp->lock, flags); +} + +/* setup a list of buffers */ +/* requirement: auerbuf_init() */ +static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize) +{ + pauerbuf_t bep = NULL; + + dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize); + + /* fill the list of free elements */ + for (;numElements; numElements--) { + bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL); + if (!bep) + goto bl_fail; + bep->list = bcp; + INIT_LIST_HEAD (&bep->buff_list); + bep->bufp = kmalloc (bufsize, GFP_KERNEL); + if (!bep->bufp) + goto bl_fail; + bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL); + if (!bep->dr) + goto bl_fail; + bep->urbp = usb_alloc_urb (0, GFP_KERNEL); + if (!bep->urbp) + goto bl_fail; + list_add_tail (&bep->buff_list, &bcp->free_buff_list); + } + return 0; + +bl_fail:/* not enough memory. Free allocated elements */ + dbg ("auerbuf_setup: no more memory"); + auerbuf_free(bep); + auerbuf_free_buffers (bcp); + return -ENOMEM; +} + +/* insert a used buffer into the free list */ +static void auerbuf_releasebuf( pauerbuf_t bp) +{ + unsigned long flags; + pauerbufctl_t bcp = bp->list; + bp->retries = 0; + + dbg ("auerbuf_releasebuf called"); + spin_lock_irqsave (&bcp->lock, flags); + list_add_tail (&bp->buff_list, &bcp->free_buff_list); + spin_unlock_irqrestore (&bcp->lock, flags); +} + + +/*-------------------------------------------------------------------*/ +/* Completion handlers */ + +/* Values of urb->status or results of usb_submit_urb(): +0 Initial, OK +-EINPROGRESS during submission until end +-ENOENT if urb is unlinked +-ETIME Device did not respond +-ENOMEM Memory Overflow +-ENODEV Specified USB-device or bus doesn't exist +-ENXIO URB already queued +-EINVAL a) Invalid transfer type specified (or not supported) + b) Invalid interrupt interval (0n256) +-EAGAIN a) Specified ISO start frame too early + b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again. +-EFBIG Too much ISO frames requested (currently uhci900) +-EPIPE Specified pipe-handle/Endpoint is already stalled +-EMSGSIZE Endpoint message size is zero, do interface/alternate setting +-EPROTO a) Bitstuff error + b) Unknown USB error +-EILSEQ CRC mismatch +-ENOSR Buffer error +-EREMOTEIO Short packet detected +-EXDEV ISO transfer only partially completed look at individual frame status for details +-EINVAL ISO madness, if this happens: Log off and go home +-EOVERFLOW babble +*/ + +/* check if a status code allows a retry */ +static int auerswald_status_retry (int status) +{ + switch (status) { + case 0: + case -ETIME: + case -EOVERFLOW: + case -EAGAIN: + case -EPIPE: + case -EPROTO: + case -EILSEQ: + case -ENOSR: + case -EREMOTEIO: + return 1; /* do a retry */ + } + return 0; /* no retry possible */ +} + +/* Completion of asynchronous write block */ +static void auerchar_ctrlwrite_complete (struct urb * urb) +{ + pauerbuf_t bp = urb->context; + pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); + dbg ("auerchar_ctrlwrite_complete called"); + + /* reuse the buffer */ + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); +} + +/* Completion handler for dummy retry packet */ +static void auerswald_ctrlread_wretcomplete (struct urb * urb) +{ + pauerbuf_t bp = urb->context; + pauerswald_t cp; + int ret; + int status = urb->status; + + dbg ("auerswald_ctrlread_wretcomplete called"); + dbg ("complete with status: %d", status); + cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); + + /* check if it is possible to advance */ + if (!auerswald_status_retry(status) || !cp->usbdev) { + /* reuse the buffer */ + err ("control dummy: transmission error %d, can not retry", status); + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); + return; + } + + /* fill the control message */ + bp->dr->bRequestType = AUT_RREQ; + bp->dr->bRequest = AUV_RBLOCK; + bp->dr->wLength = bp->dr->wValue; /* temporary stored */ + bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */ + /* bp->dr->index = channel id; remains */ + usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), + (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength), + auerswald_ctrlread_complete,bp); + + /* submit the control msg as next paket */ + ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); + if (ret) { + dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); + bp->urbp->status = ret; + auerswald_ctrlread_complete (bp->urbp); + } +} + +/* completion handler for receiving of control messages */ +static void auerswald_ctrlread_complete (struct urb * urb) +{ + unsigned int serviceid; + pauerswald_t cp; + pauerscon_t scp; + pauerbuf_t bp = urb->context; + int status = urb->status; + int ret; + + dbg ("auerswald_ctrlread_complete called"); + + cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); + + /* check if there is valid data in this urb */ + if (status) { + dbg ("complete with non-zero status: %d", status); + /* should we do a retry? */ + if (!auerswald_status_retry(status) + || !cp->usbdev + || (cp->version < AUV_RETRY) + || (bp->retries >= AU_RETRIES)) { + /* reuse the buffer */ + err ("control read: transmission error %d, can not retry", status); + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); + return; + } + bp->retries++; + dbg ("Retry count = %d", bp->retries); + /* send a long dummy control-write-message to allow device firmware to react */ + bp->dr->bRequestType = AUT_WREQ; + bp->dr->bRequest = AUV_DUMMY; + bp->dr->wValue = bp->dr->wLength; /* temporary storage */ + // bp->dr->wIndex channel ID remains + bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */ + usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), + (unsigned char*)bp->dr, bp->bufp, 32, + auerswald_ctrlread_wretcomplete,bp); + + /* submit the control msg as next paket */ + ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); + if (ret) { + dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); + bp->urbp->status = ret; + auerswald_ctrlread_wretcomplete (bp->urbp); + } + return; + } + + /* get the actual bytecount (incl. headerbyte) */ + bp->len = urb->actual_length; + serviceid = bp->bufp[0] & AUH_TYPEMASK; + dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len); + + /* dispatch the paket */ + scp = cp->services[serviceid]; + if (scp) { + /* look, Ma, a listener! */ + scp->dispatch (scp, bp); + } + + /* release the paket */ + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); +} + +/*-------------------------------------------------------------------*/ +/* Handling of Interrupt Endpoint */ +/* This interrupt Endpoint is used to inform the host about waiting + messages from the USB device. +*/ +/* int completion handler. */ +static void auerswald_int_complete (struct urb * urb) +{ + unsigned long flags; + unsigned int channelid; + unsigned int bytecount; + int ret; + int status = urb->status; + pauerbuf_t bp = NULL; + pauerswald_t cp = urb->context; + + dbg ("%s called", __func__); + + switch (status) { + case 0: + /* success */ + break; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* this urb is terminated, clean up */ + dbg("%s - urb shutting down with status: %d", __func__, status); + return; + default: + dbg("%s - nonzero urb status received: %d", __func__, status); + goto exit; + } + + /* check if all needed data was received */ + if (urb->actual_length < AU_IRQMINSIZE) { + dbg ("invalid data length received: %d bytes", urb->actual_length); + goto exit; + } + + /* check the command code */ + if (cp->intbufp[0] != AU_IRQCMDID) { + dbg ("invalid command received: %d", cp->intbufp[0]); + goto exit; + } + + /* check the command type */ + if (cp->intbufp[1] != AU_BLOCKRDY) { + dbg ("invalid command type received: %d", cp->intbufp[1]); + goto exit; + } + + /* now extract the information */ + channelid = cp->intbufp[2]; + bytecount = (unsigned char)cp->intbufp[3]; + bytecount |= (unsigned char)cp->intbufp[4] << 8; + + /* check the channel id */ + if (channelid >= AUH_TYPESIZE) { + dbg ("invalid channel id received: %d", channelid); + goto exit; + } + + /* check the byte count */ + if (bytecount > (cp->maxControlLength+AUH_SIZE)) { + dbg ("invalid byte count received: %d", bytecount); + goto exit; + } + dbg ("Service Channel = %d", channelid); + dbg ("Byte Count = %d", bytecount); + + /* get a buffer for the next data paket */ + spin_lock_irqsave (&cp->bufctl.lock, flags); + if (!list_empty (&cp->bufctl.free_buff_list)) { + /* yes: get the entry */ + struct list_head *tmp = cp->bufctl.free_buff_list.next; + list_del (tmp); + bp = list_entry (tmp, auerbuf_t, buff_list); + } + spin_unlock_irqrestore (&cp->bufctl.lock, flags); + + /* if no buffer available: skip it */ + if (!bp) { + dbg ("auerswald_int_complete: no data buffer available"); + /* can we do something more? + This is a big problem: if this int packet is ignored, the + device will wait forever and not signal any more data. + The only real solution is: having enough buffers! + Or perhaps temporary disabling the int endpoint? + */ + goto exit; + } + + /* fill the control message */ + bp->dr->bRequestType = AUT_RREQ; + bp->dr->bRequest = AUV_RBLOCK; + bp->dr->wValue = cpu_to_le16 (0); + bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT); + bp->dr->wLength = cpu_to_le16 (bytecount); + usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), + (unsigned char*)bp->dr, bp->bufp, bytecount, + auerswald_ctrlread_complete,bp); + + /* submit the control msg */ + ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); + if (ret) { + dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret); + bp->urbp->status = ret; + auerswald_ctrlread_complete( bp->urbp); + /* here applies the same problem as above: device locking! */ + } +exit: + ret = usb_submit_urb (urb, GFP_ATOMIC); + if (ret) + err ("%s - usb_submit_urb failed with result %d", + __func__, ret); +} + +/* int memory deallocation + NOTE: no mutex please! +*/ +static void auerswald_int_free (pauerswald_t cp) +{ + if (cp->inturbp) { + usb_free_urb(cp->inturbp); + cp->inturbp = NULL; + } + kfree(cp->intbufp); + cp->intbufp = NULL; +} + +/* This function is called to activate the interrupt + endpoint. This function returns 0 if successful or an error code. + NOTE: no mutex please! +*/ +static int auerswald_int_open (pauerswald_t cp) +{ + int ret; + struct usb_host_endpoint *ep; + int irqsize; + dbg ("auerswald_int_open"); + + ep = cp->usbdev->ep_in[AU_IRQENDP]; + if (!ep) { + ret = -EFAULT; + goto intoend; + } + irqsize = le16_to_cpu(ep->desc.wMaxPacketSize); + cp->irqsize = irqsize; + + /* allocate the urb and data buffer */ + if (!cp->inturbp) { + cp->inturbp = usb_alloc_urb (0, GFP_KERNEL); + if (!cp->inturbp) { + ret = -ENOMEM; + goto intoend; + } + } + if (!cp->intbufp) { + cp->intbufp = kmalloc (irqsize, GFP_KERNEL); + if (!cp->intbufp) { + ret = -ENOMEM; + goto intoend; + } + } + /* setup urb */ + usb_fill_int_urb (cp->inturbp, cp->usbdev, + usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp, + irqsize, auerswald_int_complete, cp, ep->desc.bInterval); + /* start the urb */ + cp->inturbp->status = 0; /* needed! */ + ret = usb_submit_urb (cp->inturbp, GFP_KERNEL); + +intoend: + if (ret < 0) { + /* activation of interrupt endpoint has failed. Now clean up. */ + dbg ("auerswald_int_open: activation of int endpoint failed"); + + /* deallocate memory */ + auerswald_int_free (cp); + } + return ret; +} + +/* This function is called to deactivate the interrupt + endpoint. This function returns 0 if successful or an error code. + NOTE: no mutex please! +*/ +static void auerswald_int_release (pauerswald_t cp) +{ + dbg ("auerswald_int_release"); + + /* stop the int endpoint */ + usb_kill_urb (cp->inturbp); + + /* deallocate memory */ + auerswald_int_free (cp); +} + +/* --------------------------------------------------------------------- */ +/* Helper functions */ + +/* wake up waiting readers */ +static void auerchar_disconnect (pauerscon_t scp) +{ + pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); + dbg ("auerchar_disconnect called"); + ccp->removed = 1; + wake_up (&ccp->readwait); +} + + +/* dispatch a read paket to a waiting character device */ +static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp) +{ + unsigned long flags; + pauerchar_t ccp; + pauerbuf_t newbp = NULL; + char * charp; + dbg ("auerchar_ctrlread_dispatch called"); + ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); + + /* get a read buffer from character device context */ + spin_lock_irqsave (&ccp->bufctl.lock, flags); + if (!list_empty (&ccp->bufctl.free_buff_list)) { + /* yes: get the entry */ + struct list_head *tmp = ccp->bufctl.free_buff_list.next; + list_del (tmp); + newbp = list_entry (tmp, auerbuf_t, buff_list); + } + spin_unlock_irqrestore (&ccp->bufctl.lock, flags); + + if (!newbp) { + dbg ("No read buffer available, discard paket!"); + return; /* no buffer, no dispatch */ + } + + /* copy information to new buffer element + (all buffers have the same length) */ + charp = newbp->bufp; + newbp->bufp = bp->bufp; + bp->bufp = charp; + newbp->len = bp->len; + + /* insert new buffer in read list */ + spin_lock_irqsave (&ccp->bufctl.lock, flags); + list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list); + spin_unlock_irqrestore (&ccp->bufctl.lock, flags); + dbg ("read buffer appended to rec_list"); + + /* wake up pending synchronous reads */ + wake_up (&ccp->readwait); +} + + +/* Delete an auerswald driver context */ +static void auerswald_delete( pauerswald_t cp) +{ + dbg( "auerswald_delete"); + if (cp == NULL) + return; + + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); + + /* Cleaning up */ + auerswald_int_release (cp); + auerchain_free (&cp->controlchain); + auerbuf_free_buffers (&cp->bufctl); + + /* release the memory */ + kfree( cp); +} + + +/* Delete an auerswald character context */ +static void auerchar_delete( pauerchar_t ccp) +{ + dbg ("auerchar_delete"); + if (ccp == NULL) + return; + + /* wake up pending synchronous reads */ + ccp->removed = 1; + wake_up (&ccp->readwait); + + /* remove the read buffer */ + if (ccp->readbuf) { + auerbuf_releasebuf (ccp->readbuf); + ccp->readbuf = NULL; + } + + /* remove the character buffers */ + auerbuf_free_buffers (&ccp->bufctl); + + /* release the memory */ + kfree( ccp); +} + + +/* add a new service to the device + scp->id must be set! + return: 0 if OK, else error code +*/ +static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp) +{ + int ret; + + /* is the device available? */ + if (!cp->usbdev) { + dbg ("usbdev == NULL"); + return -EIO; /*no: can not add a service, sorry*/ + } + + /* is the service available? */ + if (cp->services[scp->id]) { + dbg ("service is busy"); + return -EBUSY; + } + + /* device is available, service is free */ + cp->services[scp->id] = scp; + + /* register service in device */ + ret = auerchain_control_msg( + &cp->controlchain, /* pointer to control chain */ + cp->usbdev, /* pointer to device */ + usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ + AUV_CHANNELCTL, /* USB message request value */ + AUT_WREQ, /* USB message request type value */ + 0x01, /* open USB message value */ + scp->id, /* USB message index value */ + NULL, /* pointer to the data to send */ + 0, /* length in bytes of the data to send */ + HZ * 2); /* time to wait for the message to complete before timing out */ + if (ret < 0) { + dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret); + /* undo above actions */ + cp->services[scp->id] = NULL; + return ret; + } + + dbg ("auerswald_addservice: channel open OK"); + return 0; +} + + +/* remove a service from the device + scp->id must be set! */ +static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) +{ + dbg ("auerswald_removeservice called"); + + /* check if we have a service allocated */ + if (scp->id == AUH_UNASSIGNED) + return; + + /* If there is a device: close the channel */ + if (cp->usbdev) { + /* Close the service channel inside the device */ + int ret = auerchain_control_msg( + &cp->controlchain, /* pointer to control chain */ + cp->usbdev, /* pointer to device */ + usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ + AUV_CHANNELCTL, /* USB message request value */ + AUT_WREQ, /* USB message request type value */ + 0x00, // close /* USB message value */ + scp->id, /* USB message index value */ + NULL, /* pointer to the data to send */ + 0, /* length in bytes of the data to send */ + HZ * 2); /* time to wait for the message to complete before timing out */ + if (ret < 0) { + dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret); + } + else { + dbg ("auerswald_removeservice: channel close OK"); + } + } + + /* remove the service from the device */ + cp->services[scp->id] = NULL; + scp->id = AUH_UNASSIGNED; +} + + +/* --------------------------------------------------------------------- */ +/* Char device functions */ + +/* Open a new character device */ +static int auerchar_open (struct inode *inode, struct file *file) +{ + int dtindex = iminor(inode); + pauerswald_t cp = NULL; + pauerchar_t ccp = NULL; + struct usb_interface *intf; + int ret; + + /* minor number in range? */ + if (dtindex < 0) { + return -ENODEV; + } + intf = usb_find_interface(&auerswald_driver, dtindex); + if (!intf) { + return -ENODEV; + } + + /* usb device available? */ + cp = usb_get_intfdata (intf); + if (cp == NULL) { + return -ENODEV; + } + if (mutex_lock_interruptible(&cp->mutex)) { + return -ERESTARTSYS; + } + + /* we have access to the device. Now lets allocate memory */ + ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL); + if (ccp == NULL) { + err ("out of memory"); + ret = -ENOMEM; + goto ofail; + } + + /* Initialize device descriptor */ + mutex_init(&ccp->mutex); + mutex_init(&ccp->readmutex); + auerbuf_init (&ccp->bufctl); + ccp->scontext.id = AUH_UNASSIGNED; + ccp->scontext.dispatch = auerchar_ctrlread_dispatch; + ccp->scontext.disconnect = auerchar_disconnect; + init_waitqueue_head (&ccp->readwait); + + ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE); + if (ret) { + goto ofail; + } + + cp->open_count++; + ccp->auerdev = cp; + dbg("open %s as /dev/%s", cp->dev_desc, cp->name); + mutex_unlock(&cp->mutex); + + /* file IO stuff */ + file->f_pos = 0; + file->private_data = ccp; + return nonseekable_open(inode, file); + + /* Error exit */ +ofail: mutex_unlock(&cp->mutex); + auerchar_delete (ccp); + return ret; +} + + +/* IOCTL functions */ +static long auerchar_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + pauerchar_t ccp = (pauerchar_t) file->private_data; + int ret = 0; + audevinfo_t devinfo; + pauerswald_t cp = NULL; + unsigned int u; + unsigned int __user *user_arg = (unsigned int __user *)arg; + + dbg ("ioctl"); + + /* get the mutexes */ + if (mutex_lock_interruptible(&ccp->mutex)) { + return -ERESTARTSYS; + } + cp = ccp->auerdev; + if (!cp) { + mutex_unlock(&ccp->mutex); + return -ENODEV; + } + if (mutex_lock_interruptible(&cp->mutex)) { + mutex_unlock(&ccp->mutex); + return -ERESTARTSYS; + } + + /* Check for removal */ + if (!cp->usbdev) { + mutex_unlock(&cp->mutex); + mutex_unlock(&ccp->mutex); + return -ENODEV; + } + lock_kernel(); + switch (cmd) { + + /* return != 0 if Transmitt channel ready to send */ + case IOCTL_AU_TXREADY: + dbg ("IOCTL_AU_TXREADY"); + u = ccp->auerdev + && (ccp->scontext.id != AUH_UNASSIGNED) + && !list_empty (&cp->bufctl.free_buff_list); + ret = put_user (u, user_arg); + break; + + /* return != 0 if connected to a service channel */ + case IOCTL_AU_CONNECT: + dbg ("IOCTL_AU_CONNECT"); + u = (ccp->scontext.id != AUH_UNASSIGNED); + ret = put_user (u, user_arg); + break; + + /* return != 0 if Receive Data available */ + case IOCTL_AU_RXAVAIL: + dbg ("IOCTL_AU_RXAVAIL"); + if (ccp->scontext.id == AUH_UNASSIGNED) { + ret = -EIO; + break; + } + u = 0; /* no data */ + if (ccp->readbuf) { + int restlen = ccp->readbuf->len - ccp->readoffset; + if (restlen > 0) + u = 1; + } + if (!u) { + if (!list_empty (&ccp->bufctl.rec_buff_list)) { + u = 1; + } + } + ret = put_user (u, user_arg); + break; + + /* return the max. buffer length for the device */ + case IOCTL_AU_BUFLEN: + dbg ("IOCTL_AU_BUFLEN"); + u = cp->maxControlLength; + ret = put_user (u, user_arg); + break; + + /* requesting a service channel */ + case IOCTL_AU_SERVREQ: + dbg ("IOCTL_AU_SERVREQ"); + /* requesting a service means: release the previous one first */ + auerswald_removeservice (cp, &ccp->scontext); + /* get the channel number */ + ret = get_user (u, user_arg); + if (ret) { + break; + } + if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) { + ret = -EIO; + break; + } + dbg ("auerchar service request parameters are ok"); + ccp->scontext.id = u; + + /* request the service now */ + ret = auerswald_addservice (cp, &ccp->scontext); + if (ret) { + /* no: revert service entry */ + ccp->scontext.id = AUH_UNASSIGNED; + } + break; + + /* get a string descriptor for the device */ + case IOCTL_AU_DEVINFO: + dbg ("IOCTL_AU_DEVINFO"); + if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) { + ret = -EFAULT; + break; + } + u = strlen(cp->dev_desc)+1; + if (u > devinfo.bsize) { + u = devinfo.bsize; + } + ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0; + break; + + /* get the max. string descriptor length */ + case IOCTL_AU_SLEN: + dbg ("IOCTL_AU_SLEN"); + u = AUSI_DLEN; + ret = put_user (u, user_arg); + break; + + default: + dbg ("IOCTL_AU_UNKNOWN"); + ret = -ENOTTY; + break; + } + unlock_kernel(); + /* release the mutexes */ + mutex_unlock(&cp->mutex); + mutex_unlock(&ccp->mutex); + return ret; +} + +/* Read data from the device */ +static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos) +{ + unsigned long flags; + pauerchar_t ccp = (pauerchar_t) file->private_data; + pauerbuf_t bp = NULL; + wait_queue_t wait; + + dbg ("auerchar_read"); + + /* Error checking */ + if (!ccp) + return -EIO; + if (*ppos) + return -ESPIPE; + if (count == 0) + return 0; + + /* get the mutex */ + if (mutex_lock_interruptible(&ccp->mutex)) + return -ERESTARTSYS; + + /* Can we expect to read something? */ + if (ccp->scontext.id == AUH_UNASSIGNED) { + mutex_unlock(&ccp->mutex); + return -EIO; + } + + /* only one reader per device allowed */ + if (mutex_lock_interruptible(&ccp->readmutex)) { + mutex_unlock(&ccp->mutex); + return -ERESTARTSYS; + } + + /* read data from readbuf, if available */ +doreadbuf: + bp = ccp->readbuf; + if (bp) { + /* read the maximum bytes */ + int restlen = bp->len - ccp->readoffset; + if (restlen < 0) + restlen = 0; + if (count > restlen) + count = restlen; + if (count) { + if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) { + dbg ("auerswald_read: copy_to_user failed"); + mutex_unlock(&ccp->readmutex); + mutex_unlock(&ccp->mutex); + return -EFAULT; + } + } + /* advance the read offset */ + ccp->readoffset += count; + restlen -= count; + // reuse the read buffer + if (restlen <= 0) { + auerbuf_releasebuf (bp); + ccp->readbuf = NULL; + } + /* return with number of bytes read */ + if (count) { + mutex_unlock(&ccp->readmutex); + mutex_unlock(&ccp->mutex); + return count; + } + } + + /* a read buffer is not available. Try to get the next data block. */ +doreadlist: + /* Preparing for sleep */ + init_waitqueue_entry (&wait, current); + set_current_state (TASK_INTERRUPTIBLE); + add_wait_queue (&ccp->readwait, &wait); + + bp = NULL; + spin_lock_irqsave (&ccp->bufctl.lock, flags); + if (!list_empty (&ccp->bufctl.rec_buff_list)) { + /* yes: get the entry */ + struct list_head *tmp = ccp->bufctl.rec_buff_list.next; + list_del (tmp); + bp = list_entry (tmp, auerbuf_t, buff_list); + } + spin_unlock_irqrestore (&ccp->bufctl.lock, flags); + + /* have we got data? */ + if (bp) { + ccp->readbuf = bp; + ccp->readoffset = AUH_SIZE; /* for headerbyte */ + set_current_state (TASK_RUNNING); + remove_wait_queue (&ccp->readwait, &wait); + goto doreadbuf; /* now we can read! */ + } + + /* no data available. Should we wait? */ + if (file->f_flags & O_NONBLOCK) { + dbg ("No read buffer available, returning -EAGAIN"); + set_current_state (TASK_RUNNING); + remove_wait_queue (&ccp->readwait, &wait); + mutex_unlock(&ccp->readmutex); + mutex_unlock(&ccp->mutex); + return -EAGAIN; /* nonblocking, no data available */ + } + + /* yes, we should wait! */ + mutex_unlock(&ccp->mutex); /* allow other operations while we wait */ + schedule(); + remove_wait_queue (&ccp->readwait, &wait); + if (signal_pending (current)) { + /* waked up by a signal */ + mutex_unlock(&ccp->readmutex); + return -ERESTARTSYS; + } + + /* Anything left to read? */ + if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) { + mutex_unlock(&ccp->readmutex); + return -EIO; + } + + if (mutex_lock_interruptible(&ccp->mutex)) { + mutex_unlock(&ccp->readmutex); + return -ERESTARTSYS; + } + + /* try to read the incoming data again */ + goto doreadlist; +} + + +/* Write a data block into the right service channel of the device */ +static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) +{ + pauerchar_t ccp = (pauerchar_t) file->private_data; + pauerswald_t cp = NULL; + pauerbuf_t bp; + unsigned long flags; + int ret; + wait_queue_t wait; + + dbg ("auerchar_write %zd bytes", len); + + /* Error checking */ + if (!ccp) + return -EIO; + if (*ppos) + return -ESPIPE; + if (len == 0) + return 0; + +write_again: + /* get the mutex */ + if (mutex_lock_interruptible(&ccp->mutex)) + return -ERESTARTSYS; + + /* Can we expect to write something? */ + if (ccp->scontext.id == AUH_UNASSIGNED) { + mutex_unlock(&ccp->mutex); + return -EIO; + } + + cp = ccp->auerdev; + if (!cp) { + mutex_unlock(&ccp->mutex); + return -ERESTARTSYS; + } + if (mutex_lock_interruptible(&cp->mutex)) { + mutex_unlock(&ccp->mutex); + return -ERESTARTSYS; + } + if (!cp->usbdev) { + mutex_unlock(&cp->mutex); + mutex_unlock(&ccp->mutex); + return -EIO; + } + /* Prepare for sleep */ + init_waitqueue_entry (&wait, current); + set_current_state (TASK_INTERRUPTIBLE); + add_wait_queue (&cp->bufferwait, &wait); + + /* Try to get a buffer from the device pool. + We can't use a buffer from ccp->bufctl because the write + command will last beond a release() */ + bp = NULL; + spin_lock_irqsave (&cp->bufctl.lock, flags); + if (!list_empty (&cp->bufctl.free_buff_list)) { + /* yes: get the entry */ + struct list_head *tmp = cp->bufctl.free_buff_list.next; + list_del (tmp); + bp = list_entry (tmp, auerbuf_t, buff_list); + } + spin_unlock_irqrestore (&cp->bufctl.lock, flags); + + /* are there any buffers left? */ + if (!bp) { + mutex_unlock(&cp->mutex); + mutex_unlock(&ccp->mutex); + + /* NONBLOCK: don't wait */ + if (file->f_flags & O_NONBLOCK) { + set_current_state (TASK_RUNNING); + remove_wait_queue (&cp->bufferwait, &wait); + return -EAGAIN; + } + + /* BLOCKING: wait */ + schedule(); + remove_wait_queue (&cp->bufferwait, &wait); + if (signal_pending (current)) { + /* waked up by a signal */ + return -ERESTARTSYS; + } + goto write_again; + } else { + set_current_state (TASK_RUNNING); + remove_wait_queue (&cp->bufferwait, &wait); + } + + /* protect against too big write requests */ + if (len > cp->maxControlLength) + len = cp->maxControlLength; + + /* Fill the buffer */ + if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) { + dbg ("copy_from_user failed"); + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); + mutex_unlock(&cp->mutex); + mutex_unlock(&ccp->mutex); + return -EFAULT; + } + + /* set the header byte */ + *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT; + + /* Set the transfer Parameters */ + bp->len = len+AUH_SIZE; + bp->dr->bRequestType = AUT_WREQ; + bp->dr->bRequest = AUV_WBLOCK; + bp->dr->wValue = cpu_to_le16 (0); + bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT); + bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE); + usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), + (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE, + auerchar_ctrlwrite_complete, bp); + /* up we go */ + ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); + mutex_unlock(&cp->mutex); + if (ret) { + dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret); + auerbuf_releasebuf (bp); + /* Wake up all processes waiting for a buffer */ + wake_up (&cp->bufferwait); + mutex_unlock(&ccp->mutex); + return -EIO; + } + else { + dbg ("auerchar_write: Write OK"); + mutex_unlock(&ccp->mutex); + return len; + } +} + + +/* Close a character device */ +static int auerchar_release (struct inode *inode, struct file *file) +{ + pauerchar_t ccp = (pauerchar_t) file->private_data; + pauerswald_t cp; + dbg("release"); + + mutex_lock(&ccp->mutex); + cp = ccp->auerdev; + if (cp) { + mutex_lock(&cp->mutex); + /* remove an open service */ + auerswald_removeservice (cp, &ccp->scontext); + /* detach from device */ + if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) { + /* usb device waits for removal */ + mutex_unlock(&cp->mutex); + auerswald_delete (cp); + } else { + mutex_unlock(&cp->mutex); + } + cp = NULL; + ccp->auerdev = NULL; + } + mutex_unlock(&ccp->mutex); + auerchar_delete (ccp); + + return 0; +} + + +/*----------------------------------------------------------------------*/ +/* File operation structure */ +static const struct file_operations auerswald_fops = +{ + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = auerchar_read, + .write = auerchar_write, + .unlocked_ioctl = auerchar_ioctl, + .open = auerchar_open, + .release = auerchar_release, +}; + +static struct usb_class_driver auerswald_class = { + .name = "auer%d", + .fops = &auerswald_fops, + .minor_base = AUER_MINOR_BASE, +}; + + +/* --------------------------------------------------------------------- */ +/* Special USB driver functions */ + +/* Probe if this driver wants to serve an USB device + + This entry point is called whenever a new device is attached to the bus. + Then the device driver has to create a new instance of its internal data + structures for the new device. + + The dev argument specifies the device context, which contains pointers + to all USB descriptors. The interface argument specifies the interface + number. If a USB driver wants to bind itself to a particular device and + interface it has to return a pointer. This pointer normally references + the device driver's context structure. + + Probing normally is done by checking the vendor and product identifications + or the class and subclass definitions. If they match the interface number + is compared with the ones supported by the driver. When probing is done + class based it might be necessary to parse some more USB descriptors because + the device properties can differ in a wide range. +*/ +static int auerswald_probe (struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *usbdev = interface_to_usbdev(intf); + pauerswald_t cp = NULL; + unsigned int u = 0; + __le16 *pbuf; + int ret; + + dbg ("probe: vendor id 0x%x, device id 0x%x", + le16_to_cpu(usbdev->descriptor.idVendor), + le16_to_cpu(usbdev->descriptor.idProduct)); + + /* we use only the first -and only- interface */ + if (intf->altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + /* allocate memory for our device and initialize it */ + cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL); + if (cp == NULL) { + err ("out of memory"); + goto pfail; + } + + /* Initialize device descriptor */ + mutex_init(&cp->mutex); + cp->usbdev = usbdev; + auerchain_init (&cp->controlchain); + auerbuf_init (&cp->bufctl); + init_waitqueue_head (&cp->bufferwait); + + ret = usb_register_dev(intf, &auerswald_class); + if (ret) { + err ("Not able to get a minor for this device."); + goto pfail; + } + + /* Give the device a name */ + sprintf (cp->name, "usb/auer%d", intf->minor); + + /* Store the index */ + cp->dtindex = intf->minor; + + /* Get the usb version of the device */ + cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice); + dbg ("Version is %X", cp->version); + + /* allow some time to settle the device */ + msleep(334); + + /* Try to get a suitable textual description of the device */ + /* Device name:*/ + ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1); + if (ret >= 0) { + u += ret; + /* Append Serial Number */ + memcpy(&cp->dev_desc[u], ",Ser# ", 6); + u += 6; + ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1); + if (ret >= 0) { + u += ret; + /* Append subscriber number */ + memcpy(&cp->dev_desc[u], ", ", 2); + u += 2; + ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1); + if (ret >= 0) { + u += ret; + } + } + } + cp->dev_desc[u] = '\0'; + info("device is a %s", cp->dev_desc); + + /* get the maximum allowed control transfer length */ + pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */ + if (!pbuf) { + err( "out of memory"); + goto pfail; + } + ret = usb_control_msg(cp->usbdev, /* pointer to device */ + usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */ + AUV_GETINFO, /* USB message request value */ + AUT_RREQ, /* USB message request type value */ + 0, /* USB message value */ + AUDI_MBCTRANS, /* USB message index value */ + pbuf, /* pointer to the receive buffer */ + 2, /* length of the buffer */ + 2000); /* time to wait for the message to complete before timing out */ + if (ret == 2) { + cp->maxControlLength = le16_to_cpup(pbuf); + kfree(pbuf); + dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength); + } else { + kfree(pbuf); + err("setup: getting max. allowed control transfer length failed with error %d", ret); + goto pfail; + } + + /* allocate a chain for the control messages */ + if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) { + err ("out of memory"); + goto pfail; + } + + /* allocate buffers for control messages */ + if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) { + err ("out of memory"); + goto pfail; + } + + /* start the interrupt endpoint */ + if (auerswald_int_open (cp)) { + err ("int endpoint failed"); + goto pfail; + } + + /* all OK */ + usb_set_intfdata (intf, cp); + return 0; + + /* Error exit: clean up the memory */ +pfail: auerswald_delete (cp); + return -EIO; +} + + +/* Disconnect driver from a served device + + This function is called whenever a device which was served by this driver + is disconnected. + + The argument dev specifies the device context and the driver_context + returns a pointer to the previously registered driver_context of the + probe function. After returning from the disconnect function the USB + framework completely deallocates all data structures associated with + this device. So especially the usb_device structure must not be used + any longer by the usb driver. +*/ +static void auerswald_disconnect (struct usb_interface *intf) +{ + pauerswald_t cp = usb_get_intfdata (intf); + unsigned int u; + + usb_set_intfdata (intf, NULL); + if (!cp) + return; + + /* give back our USB minor number */ + usb_deregister_dev(intf, &auerswald_class); + + mutex_lock(&cp->mutex); + info ("device /dev/%s now disconnecting", cp->name); + + /* Stop the interrupt endpoint */ + auerswald_int_release (cp); + + /* remove the control chain allocated in auerswald_probe + This has the benefit of + a) all pending (a)synchronous urbs are unlinked + b) all buffers dealing with urbs are reclaimed + */ + auerchain_free (&cp->controlchain); + + if (cp->open_count == 0) { + /* nobody is using this device. So we can clean up now */ + mutex_unlock(&cp->mutex); + /* mutex_unlock() is possible here because no other task + can open the device (see above). I don't want + to kfree() a locked mutex. */ + + auerswald_delete (cp); + } else { + /* device is used. Remove the pointer to the + usb device (it's not valid any more). The last + release() will do the clean up */ + cp->usbdev = NULL; + mutex_unlock(&cp->mutex); + /* Terminate waiting writers */ + wake_up (&cp->bufferwait); + /* Inform all waiting readers */ + for ( u = 0; u < AUH_TYPESIZE; u++) { + pauerscon_t scp = cp->services[u]; + if (scp) + scp->disconnect( scp); + } + } +} + +/* Descriptor for the devices which are served by this driver. + NOTE: this struct is parsed by the usbmanager install scripts. + Don't change without caution! +*/ +static struct usb_device_id auerswald_ids [] = { + { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */ + { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */ + { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */ + { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */ + { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */ + { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */ + { } /* Terminating entry */ +}; + +/* Standard module device table */ +MODULE_DEVICE_TABLE (usb, auerswald_ids); + +/* Standard usb driver struct */ +static struct usb_driver auerswald_driver = { + .name = "auerswald", + .probe = auerswald_probe, + .disconnect = auerswald_disconnect, + .id_table = auerswald_ids, +}; + + +/* --------------------------------------------------------------------- */ +/* Module loading/unloading */ + +/* Driver initialisation. Called after module loading. + NOTE: there is no concurrency at _init +*/ +static int __init auerswald_init (void) +{ + int result; + dbg ("init"); + + /* register driver at the USB subsystem */ + result = usb_register (&auerswald_driver); + if (result < 0) { + err ("driver could not be registered"); + return -1; + } + return 0; +} + +/* Driver deinit. Called before module removal. + NOTE: there is no concurrency at _cleanup +*/ +static void __exit auerswald_cleanup (void) +{ + dbg ("cleanup"); + usb_deregister (&auerswald_driver); +} + +/* --------------------------------------------------------------------- */ +/* Linux device driver module description */ + +MODULE_AUTHOR (DRIVER_AUTHOR); +MODULE_DESCRIPTION (DRIVER_DESC); +MODULE_LICENSE ("GPL"); + +module_init (auerswald_init); +module_exit (auerswald_cleanup); + +/* --------------------------------------------------------------------- */ + diff --git a/trunk/drivers/usb/musb/Kconfig b/trunk/drivers/usb/musb/Kconfig deleted file mode 100644 index faca4333f27a..000000000000 --- a/trunk/drivers/usb/musb/Kconfig +++ /dev/null @@ -1,176 +0,0 @@ -# -# USB Dual Role (OTG-ready) Controller Drivers -# for silicon based on Mentor Graphics INVENTRA designs -# - -comment "Enable Host or Gadget support to see Inventra options" - depends on !USB && USB_GADGET=n - -# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller -config USB_MUSB_HDRC - depends on (USB || USB_GADGET) && HAVE_CLK - select TWL4030_USB if MACH_OMAP_3430SDP - tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' - help - Say Y here if your system has a dual role high speed USB - controller based on the Mentor Graphics silicon IP. Then - configure options to match your silicon and the board - it's being used with, including the USB peripheral role, - or the USB host role, or both. - - Texas Instruments parts using this IP include DaVinci 644x, - OMAP 243x, OMAP 343x, and TUSB 6010. - - If you do not know what this is, please say N. - - To compile this driver as a module, choose M here; the - module will be called "musb_hdrc". - -config USB_MUSB_SOC - boolean - depends on USB_MUSB_HDRC - default y if ARCH_DAVINCI - default y if ARCH_OMAP2430 - default y if ARCH_OMAP34XX - help - Use a static file to describe how the - controller is configured (endpoints, mechanisms, etc) on the - current iteration of a given system-on-chip. - -comment "DaVinci 644x USB support" - depends on USB_MUSB_HDRC && ARCH_DAVINCI - -comment "OMAP 243x high speed USB support" - depends on USB_MUSB_HDRC && ARCH_OMAP2430 - -comment "OMAP 343x high speed USB support" - depends on USB_MUSB_HDRC && ARCH_OMAP34XX - -config USB_TUSB6010 - boolean "TUSB 6010 support" - depends on USB_MUSB_HDRC && !USB_MUSB_SOC - default y - help - The TUSB 6010 chip, from Texas Instruments, connects a discrete - HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ - (a high speed serial link). It can use system-specific external - DMA controllers. - -choice - prompt "Driver Mode" - depends on USB_MUSB_HDRC - help - Dual-Role devices can support both host and peripheral roles, - as well as a the special "OTG Device" role which can switch - between both roles as needed. - -# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support; -# OTG needs both roles, not just USB_MUSB_HOST. -config USB_MUSB_HOST - depends on USB - bool "USB Host" - help - Say Y here if your system supports the USB host role. - If it has a USB "A" (rectangular), "Mini-A" (uncommon), - or "Mini-AB" connector, it supports the host role. - (With a "Mini-AB" connector, you should enable USB OTG.) - -# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral -# side support ... OTG needs both roles -config USB_MUSB_PERIPHERAL - depends on USB_GADGET - bool "USB Peripheral (gadget stack)" - select USB_GADGET_MUSB_HDRC - help - Say Y here if your system supports the USB peripheral role. - If it has a USB "B" (squarish), "Mini-B", or "Mini-AB" - connector, it supports the peripheral role. - (With a "Mini-AB" connector, you should enable USB OTG.) - -config USB_MUSB_OTG - depends on USB && USB_GADGET && PM && EXPERIMENTAL - bool "Both host and peripheral: USB OTG (On The Go) Device" - select USB_GADGET_MUSB_HDRC - select USB_OTG - help - The most notable feature of USB OTG is support for a - "Dual-Role" device, which can act as either a device - or a host. The initial role choice can be changed - later, when two dual-role devices talk to each other. - - At this writing, the OTG support in this driver is incomplete, - omitting the mandatory HNP or SRP protocols. However, some - of the cable based role switching works. (That is, grounding - the ID pin switches the controller to host mode, while leaving - it floating leaves it in peripheral mode.) - - Select this if your system has a Mini-AB connector, or - to simplify certain kinds of configuration. - - To implement your OTG Targeted Peripherals List (TPL), enable - USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h" - to match your requirements. - -endchoice - -# enable peripheral support (including with OTG) -config USB_GADGET_MUSB_HDRC - bool - depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) -# default y -# select USB_GADGET_DUALSPEED -# select USB_GADGET_SELECTED - -# enables host support (including with OTG) -config USB_MUSB_HDRC_HCD - bool - depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG) - select USB_OTG if USB_GADGET_MUSB_HDRC - default y - - -config MUSB_PIO_ONLY - bool 'Disable DMA (always use PIO)' - depends on USB_MUSB_HDRC - default y if USB_TUSB6010 - help - All data is copied between memory and FIFO by the CPU. - DMA controllers are ignored. - - Do not select 'n' here unless DMA support for your SOC or board - is unavailable (or unstable). When DMA is enabled at compile time, - you can still disable it at run time using the "use_dma=n" module - parameter. - -config USB_INVENTRA_DMA - bool - depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY - default ARCH_OMAP2430 || ARCH_OMAP34XX - help - Enable DMA transfers using Mentor's engine. - -config USB_TI_CPPI_DMA - bool - depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY - default ARCH_DAVINCI - help - Enable DMA transfers when TI CPPI DMA is available. - -config USB_TUSB_OMAP_DMA - bool - depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY - depends on USB_TUSB6010 - depends on ARCH_OMAP - default y - help - Enable DMA transfers on TUSB 6010 when OMAP DMA is available. - -config USB_MUSB_LOGLEVEL - depends on USB_MUSB_HDRC - int 'Logging Level (0 - none / 3 - annoying / ... )' - default 0 - help - Set the logging level. 0 disables the debugging altogether, - although when USB_DEBUG is set the value is at least 1. - Starting at level 3, per-transfer (urb, usb_request, packet, - or dma transfer) tracing may kick in. diff --git a/trunk/drivers/usb/musb/Makefile b/trunk/drivers/usb/musb/Makefile deleted file mode 100644 index 88eb67de08ae..000000000000 --- a/trunk/drivers/usb/musb/Makefile +++ /dev/null @@ -1,86 +0,0 @@ -# -# for USB OTG silicon based on Mentor Graphics INVENTRA designs -# - -musb_hdrc-objs := musb_core.o - -obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o - -ifeq ($(CONFIG_ARCH_DAVINCI),y) - musb_hdrc-objs += davinci.o -endif - -ifeq ($(CONFIG_USB_TUSB6010),y) - musb_hdrc-objs += tusb6010.o -endif - -ifeq ($(CONFIG_ARCH_OMAP2430),y) - musb_hdrc-objs += omap2430.o -endif - -ifeq ($(CONFIG_ARCH_OMAP3430),y) - musb_hdrc-objs += omap2430.o -endif - -ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) - musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o -endif - -ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y) - musb_hdrc-objs += musb_virthub.o musb_host.o -endif - -# the kconfig must guarantee that only one of the -# possible I/O schemes will be enabled at a time ... -# PIO only, or DMA (several potential schemes). -# though PIO is always there to back up DMA, and for ep0 - -ifneq ($(CONFIG_MUSB_PIO_ONLY),y) - - ifeq ($(CONFIG_USB_INVENTRA_DMA),y) - musb_hdrc-objs += musbhsdma.o - - else - ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) - musb_hdrc-objs += cppi_dma.o - - else - ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) - musb_hdrc-objs += tusb6010_omap.o - - endif - endif - endif -endif - - -################################################################################ - -# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_* - -ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y) - EXTRA_CFLAGS += -DMUSB_AHB_ID -endif - -# Debugging - -MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL) - -ifeq ("$(strip $(MUSB_DEBUG))","") - ifdef CONFIG_USB_DEBUG - MUSB_DEBUG:=1 - else - MUSB_DEBUG:=0 - endif -endif - -ifneq ($(MUSB_DEBUG),0) - EXTRA_CFLAGS += -DDEBUG - - ifeq ($(CONFIG_PROC_FS),y) - musb_hdrc-objs += musb_procfs.o - endif - -endif - -EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG) diff --git a/trunk/drivers/usb/musb/cppi_dma.c b/trunk/drivers/usb/musb/cppi_dma.c deleted file mode 100644 index 5ad6d0893cbe..000000000000 --- a/trunk/drivers/usb/musb/cppi_dma.c +++ /dev/null @@ -1,1540 +0,0 @@ -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * This file implements a DMA interface using TI's CPPI DMA. - * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. - * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. - */ - -#include - -#include "musb_core.h" -#include "cppi_dma.h" - - -/* CPPI DMA status 7-mar-2006: - * - * - See musb_{host,gadget}.c for more info - * - * - Correct RX DMA generally forces the engine into irq-per-packet mode, - * which can easily saturate the CPU under non-mass-storage loads. - * - * NOTES 24-aug-2006 (2.6.18-rc4): - * - * - peripheral RXDMA wedged in a test with packets of length 512/512/1. - * evidently after the 1 byte packet was received and acked, the queue - * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, - * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 - * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx - * of its next (512 byte) packet. IRQ issues? - * - * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will - * evidently also directly update the RX and TX CSRs ... so audit all - * host and peripheral side DMA code to avoid CSR access after DMA has - * been started. - */ - -/* REVISIT now we can avoid preallocating these descriptors; or - * more simply, switch to a global freelist not per-channel ones. - * Note: at full speed, 64 descriptors == 4K bulk data. - */ -#define NUM_TXCHAN_BD 64 -#define NUM_RXCHAN_BD 64 - -static inline void cpu_drain_writebuffer(void) -{ - wmb(); -#ifdef CONFIG_CPU_ARM926T - /* REVISIT this "should not be needed", - * but lack of it sure seemed to hurt ... - */ - asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); -#endif -} - -static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) -{ - struct cppi_descriptor *bd = c->freelist; - - if (bd) - c->freelist = bd->next; - return bd; -} - -static inline void -cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) -{ - if (!bd) - return; - bd->next = c->freelist; - c->freelist = bd; -} - -/* - * Start DMA controller - * - * Initialize the DMA controller as necessary. - */ - -/* zero out entire rx state RAM entry for the channel */ -static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) -{ - musb_writel(&rx->rx_skipbytes, 0, 0); - musb_writel(&rx->rx_head, 0, 0); - musb_writel(&rx->rx_sop, 0, 0); - musb_writel(&rx->rx_current, 0, 0); - musb_writel(&rx->rx_buf_current, 0, 0); - musb_writel(&rx->rx_len_len, 0, 0); - musb_writel(&rx->rx_cnt_cnt, 0, 0); -} - -/* zero out entire tx state RAM entry for the channel */ -static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) -{ - musb_writel(&tx->tx_head, 0, 0); - musb_writel(&tx->tx_buf, 0, 0); - musb_writel(&tx->tx_current, 0, 0); - musb_writel(&tx->tx_buf_current, 0, 0); - musb_writel(&tx->tx_info, 0, 0); - musb_writel(&tx->tx_rem_len, 0, 0); - /* musb_writel(&tx->tx_dummy, 0, 0); */ - musb_writel(&tx->tx_complete, 0, ptr); -} - -static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) -{ - int j; - - /* initialize channel fields */ - c->head = NULL; - c->tail = NULL; - c->last_processed = NULL; - c->channel.status = MUSB_DMA_STATUS_UNKNOWN; - c->controller = cppi; - c->is_rndis = 0; - c->freelist = NULL; - - /* build the BD Free list for the channel */ - for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { - struct cppi_descriptor *bd; - dma_addr_t dma; - - bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); - bd->dma = dma; - cppi_bd_free(c, bd); - } -} - -static int cppi_channel_abort(struct dma_channel *); - -static void cppi_pool_free(struct cppi_channel *c) -{ - struct cppi *cppi = c->controller; - struct cppi_descriptor *bd; - - (void) cppi_channel_abort(&c->channel); - c->channel.status = MUSB_DMA_STATUS_UNKNOWN; - c->controller = NULL; - - /* free all its bds */ - bd = c->last_processed; - do { - if (bd) - dma_pool_free(cppi->pool, bd, bd->dma); - bd = cppi_bd_alloc(c); - } while (bd); - c->last_processed = NULL; -} - -static int __init cppi_controller_start(struct dma_controller *c) -{ - struct cppi *controller; - void __iomem *tibase; - int i; - - controller = container_of(c, struct cppi, controller); - - /* do whatever is necessary to start controller */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - controller->tx[i].transmit = true; - controller->tx[i].index = i; - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { - controller->rx[i].transmit = false; - controller->rx[i].index = i; - } - - /* setup BD list on a per channel basis */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) - cppi_pool_init(controller, controller->tx + i); - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) - cppi_pool_init(controller, controller->rx + i); - - tibase = controller->tibase; - INIT_LIST_HEAD(&controller->tx_complete); - - /* initialise tx/rx channel head pointers to zero */ - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - struct cppi_channel *tx_ch = controller->tx + i; - struct cppi_tx_stateram __iomem *tx; - - INIT_LIST_HEAD(&tx_ch->tx_complete); - - tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); - tx_ch->state_ram = tx; - cppi_reset_tx(tx, 0); - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { - struct cppi_channel *rx_ch = controller->rx + i; - struct cppi_rx_stateram __iomem *rx; - - INIT_LIST_HEAD(&rx_ch->tx_complete); - - rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); - rx_ch->state_ram = rx; - cppi_reset_rx(rx); - } - - /* enable individual cppi channels */ - musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - - /* enable tx/rx CPPI control */ - musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); - - /* disable RNDIS mode, also host rx RNDIS autorequest */ - musb_writel(tibase, DAVINCI_RNDIS_REG, 0); - musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); - - return 0; -} - -/* - * Stop DMA controller - * - * De-Init the DMA controller as necessary. - */ - -static int cppi_controller_stop(struct dma_controller *c) -{ - struct cppi *controller; - void __iomem *tibase; - int i; - - controller = container_of(c, struct cppi, controller); - - tibase = controller->tibase; - /* DISABLE INDIVIDUAL CHANNEL Interrupts */ - musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, - DAVINCI_DMA_ALL_CHANNELS_ENABLE); - - DBG(1, "Tearing down RX and TX Channels\n"); - for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { - /* FIXME restructure of txdma to use bds like rxdma */ - controller->tx[i].last_processed = NULL; - cppi_pool_free(controller->tx + i); - } - for (i = 0; i < ARRAY_SIZE(controller->rx); i++) - cppi_pool_free(controller->rx + i); - - /* in Tx Case proper teardown is supported. We resort to disabling - * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is - * complete TX CPPI cannot be disabled. - */ - /*disable tx/rx cppi */ - musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); - musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); - - return 0; -} - -/* While dma channel is allocated, we only want the core irqs active - * for fault reports, otherwise we'd get irqs that we don't care about. - * Except for TX irqs, where dma done != fifo empty and reusable ... - * - * NOTE: docs don't say either way, but irq masking **enables** irqs. - * - * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... - */ -static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) -{ - musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); -} - -static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) -{ - musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); -} - - -/* - * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to - * each transfer direction of a non-control endpoint, so allocating - * (and deallocating) is mostly a way to notice bad housekeeping on - * the software side. We assume the irqs are always active. - */ -static struct dma_channel * -cppi_channel_allocate(struct dma_controller *c, - struct musb_hw_ep *ep, u8 transmit) -{ - struct cppi *controller; - u8 index; - struct cppi_channel *cppi_ch; - void __iomem *tibase; - - controller = container_of(c, struct cppi, controller); - tibase = controller->tibase; - - /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ - index = ep->epnum - 1; - - /* return the corresponding CPPI Channel Handle, and - * probably disable the non-CPPI irq until we need it. - */ - if (transmit) { - if (index >= ARRAY_SIZE(controller->tx)) { - DBG(1, "no %cX%d CPPI channel\n", 'T', index); - return NULL; - } - cppi_ch = controller->tx + index; - } else { - if (index >= ARRAY_SIZE(controller->rx)) { - DBG(1, "no %cX%d CPPI channel\n", 'R', index); - return NULL; - } - cppi_ch = controller->rx + index; - core_rxirq_disable(tibase, ep->epnum); - } - - /* REVISIT make this an error later once the same driver code works - * with the other DMA engine too - */ - if (cppi_ch->hw_ep) - DBG(1, "re-allocating DMA%d %cX channel %p\n", - index, transmit ? 'T' : 'R', cppi_ch); - cppi_ch->hw_ep = ep; - cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; - - DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); - return &cppi_ch->channel; -} - -/* Release a CPPI Channel. */ -static void cppi_channel_release(struct dma_channel *channel) -{ - struct cppi_channel *c; - void __iomem *tibase; - - /* REVISIT: for paranoia, check state and abort if needed... */ - - c = container_of(channel, struct cppi_channel, channel); - tibase = c->controller->tibase; - if (!c->hw_ep) - DBG(1, "releasing idle DMA channel %p\n", c); - else if (!c->transmit) - core_rxirq_enable(tibase, c->index + 1); - - /* for now, leave its cppi IRQ enabled (we won't trigger it) */ - c->hw_ep = NULL; - channel->status = MUSB_DMA_STATUS_UNKNOWN; -} - -/* Context: controller irqlocked */ -static void -cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) -{ - void __iomem *base = c->controller->mregs; - struct cppi_rx_stateram __iomem *rx = c->state_ram; - - musb_ep_select(base, c->index + 1); - - DBG(level, "RX DMA%d%s: %d left, csr %04x, " - "%08x H%08x S%08x C%08x, " - "B%08x L%08x %08x .. %08x" - "\n", - c->index, tag, - musb_readl(c->controller->tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), - musb_readw(c->hw_ep->regs, MUSB_RXCSR), - - musb_readl(&rx->rx_skipbytes, 0), - musb_readl(&rx->rx_head, 0), - musb_readl(&rx->rx_sop, 0), - musb_readl(&rx->rx_current, 0), - - musb_readl(&rx->rx_buf_current, 0), - musb_readl(&rx->rx_len_len, 0), - musb_readl(&rx->rx_cnt_cnt, 0), - musb_readl(&rx->rx_complete, 0) - ); -} - -/* Context: controller irqlocked */ -static void -cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) -{ - void __iomem *base = c->controller->mregs; - struct cppi_tx_stateram __iomem *tx = c->state_ram; - - musb_ep_select(base, c->index + 1); - - DBG(level, "TX DMA%d%s: csr %04x, " - "H%08x S%08x C%08x %08x, " - "F%08x L%08x .. %08x" - "\n", - c->index, tag, - musb_readw(c->hw_ep->regs, MUSB_TXCSR), - - musb_readl(&tx->tx_head, 0), - musb_readl(&tx->tx_buf, 0), - musb_readl(&tx->tx_current, 0), - musb_readl(&tx->tx_buf_current, 0), - - musb_readl(&tx->tx_info, 0), - musb_readl(&tx->tx_rem_len, 0), - /* dummy/unused word 6 */ - musb_readl(&tx->tx_complete, 0) - ); -} - -/* Context: controller irqlocked */ -static inline void -cppi_rndis_update(struct cppi_channel *c, int is_rx, - void __iomem *tibase, int is_rndis) -{ - /* we may need to change the rndis flag for this cppi channel */ - if (c->is_rndis != is_rndis) { - u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); - u32 temp = 1 << (c->index); - - if (is_rx) - temp <<= 16; - if (is_rndis) - value |= temp; - else - value &= ~temp; - musb_writel(tibase, DAVINCI_RNDIS_REG, value); - c->is_rndis = is_rndis; - } -} - -static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) -{ - pr_debug("RXBD/%s %08x: " - "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", - tag, bd->dma, - bd->hw_next, bd->hw_bufp, bd->hw_off_len, - bd->hw_options); -} - -static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) -{ -#if MUSB_DEBUG > 0 - struct cppi_descriptor *bd; - - if (!_dbg_level(level)) - return; - cppi_dump_rx(level, rx, tag); - if (rx->last_processed) - cppi_dump_rxbd("last", rx->last_processed); - for (bd = rx->head; bd; bd = bd->next) - cppi_dump_rxbd("active", bd); -#endif -} - - -/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; - * so we won't ever use it (see "CPPI RX Woes" below). - */ -static inline int cppi_autoreq_update(struct cppi_channel *rx, - void __iomem *tibase, int onepacket, unsigned n_bds) -{ - u32 val; - -#ifdef RNDIS_RX_IS_USABLE - u32 tmp; - /* assert(is_host_active(musb)) */ - - /* start from "AutoReq never" */ - tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - val = tmp & ~((0x3) << (rx->index * 2)); - - /* HCD arranged reqpkt for packet #1. we arrange int - * for all but the last one, maybe in two segments. - */ - if (!onepacket) { -#if 0 - /* use two segments, autoreq "all" then the last "never" */ - val |= ((0x3) << (rx->index * 2)); - n_bds--; -#else - /* one segment, autoreq "all-but-last" */ - val |= ((0x1) << (rx->index * 2)); -#endif - } - - if (val != tmp) { - int n = 100; - - /* make sure that autoreq is updated before continuing */ - musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); - do { - tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - if (tmp == val) - break; - cpu_relax(); - } while (n-- > 0); - } -#endif - - /* REQPKT is turned off after each segment */ - if (n_bds && rx->channel.actual_len) { - void __iomem *regs = rx->hw_ep->regs; - - val = musb_readw(regs, MUSB_RXCSR); - if (!(val & MUSB_RXCSR_H_REQPKT)) { - val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; - musb_writew(regs, MUSB_RXCSR, val); - /* flush writebufer */ - val = musb_readw(regs, MUSB_RXCSR); - } - } - return n_bds; -} - - -/* Buffer enqueuing Logic: - * - * - RX builds new queues each time, to help handle routine "early - * termination" cases (faults, including errors and short reads) - * more correctly. - * - * - for now, TX reuses the same queue of BDs every time - * - * REVISIT long term, we want a normal dynamic model. - * ... the goal will be to append to the - * existing queue, processing completed "dma buffers" (segments) on the fly. - * - * Otherwise we force an IRQ latency between requests, which slows us a lot - * (especially in "transparent" dma). Unfortunately that model seems to be - * inherent in the DMA model from the Mentor code, except in the rare case - * of transfers big enough (~128+ KB) that we could append "middle" segments - * in the TX paths. (RX can't do this, see below.) - * - * That's true even in the CPPI- friendly iso case, where most urbs have - * several small segments provided in a group and where the "packet at a time" - * "transparent" DMA model is always correct, even on the RX side. - */ - -/* - * CPPI TX: - * ======== - * TX is a lot more reasonable than RX; it doesn't need to run in - * irq-per-packet mode very often. RNDIS mode seems to behave too - * (except how it handles the exactly-N-packets case). Building a - * txdma queue with multiple requests (urb or usb_request) looks - * like it would work ... but fault handling would need much testing. - * - * The main issue with TX mode RNDIS relates to transfer lengths that - * are an exact multiple of the packet length. It appears that there's - * a hiccup in that case (maybe the DMA completes before the ZLP gets - * written?) boiling down to not being able to rely on CPPI writing any - * terminating zero length packet before the next transfer is written. - * So that's punted to PIO; better yet, gadget drivers can avoid it. - * - * Plus, there's allegedly an undocumented constraint that rndis transfer - * length be a multiple of 64 bytes ... but the chip doesn't act that - * way, and we really don't _want_ that behavior anyway. - * - * On TX, "transparent" mode works ... although experiments have shown - * problems trying to use the SOP/EOP bits in different USB packets. - * - * REVISIT try to handle terminating zero length packets using CPPI - * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet - * links avoid that issue by forcing them to avoid zlps.) - */ -static void -cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) -{ - unsigned maxpacket = tx->maxpacket; - dma_addr_t addr = tx->buf_dma + tx->offset; - size_t length = tx->buf_len - tx->offset; - struct cppi_descriptor *bd; - unsigned n_bds; - unsigned i; - struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; - int rndis; - - /* TX can use the CPPI "rndis" mode, where we can probably fit this - * transfer in one BD and one IRQ. The only time we would NOT want - * to use it is when hardware constraints prevent it, or if we'd - * trigger the "send a ZLP?" confusion. - */ - rndis = (maxpacket & 0x3f) == 0 - && length < 0xffff - && (length % maxpacket) != 0; - - if (rndis) { - maxpacket = length; - n_bds = 1; - } else { - n_bds = length / maxpacket; - if (!length || (length % maxpacket)) - n_bds++; - n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); - length = min(n_bds * maxpacket, length); - } - - DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", - tx->index, - maxpacket, - rndis ? "rndis" : "transparent", - n_bds, - addr, length); - - cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); - - /* assuming here that channel_program is called during - * transfer initiation ... current code maintains state - * for one outstanding request only (no queues, not even - * the implicit ones of an iso urb). - */ - - bd = tx->freelist; - tx->head = bd; - tx->last_processed = NULL; - - /* FIXME use BD pool like RX side does, and just queue - * the minimum number for this request. - */ - - /* Prepare queue of BDs first, then hand it to hardware. - * All BDs except maybe the last should be of full packet - * size; for RNDIS there _is_ only that last packet. - */ - for (i = 0; i < n_bds; ) { - if (++i < n_bds && bd->next) - bd->hw_next = bd->next->dma; - else - bd->hw_next = 0; - - bd->hw_bufp = tx->buf_dma + tx->offset; - - /* FIXME set EOP only on the last packet, - * SOP only on the first ... avoid IRQs - */ - if ((tx->offset + maxpacket) <= tx->buf_len) { - tx->offset += maxpacket; - bd->hw_off_len = maxpacket; - bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET - | CPPI_OWN_SET | maxpacket; - } else { - /* only this one may be a partial USB Packet */ - u32 partial_len; - - partial_len = tx->buf_len - tx->offset; - tx->offset = tx->buf_len; - bd->hw_off_len = partial_len; - - bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET - | CPPI_OWN_SET | partial_len; - if (partial_len == 0) - bd->hw_options |= CPPI_ZERO_SET; - } - - DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", - bd, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options); - - /* update the last BD enqueued to the list */ - tx->tail = bd; - bd = bd->next; - } - - /* BDs live in DMA-coherent memory, but writes might be pending */ - cpu_drain_writebuffer(); - - /* Write to the HeadPtr in state RAM to trigger */ - musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); - - cppi_dump_tx(5, tx, "/S"); -} - -/* - * CPPI RX Woes: - * ============= - * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte - * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. - * (Full speed transfers have similar scenarios.) - * - * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, - * and the next packet goes into a buffer that's queued later; while (b) fills - * the buffer with 1024 bytes. How to do that with CPPI? - * - * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but - * (b) loses **BADLY** because nothing (!) happens when that second packet - * fills the buffer, much less when a third one arrives. (Which makes this - * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination - * is optional, and it's fine if peripherals -- not hosts! -- pad messages - * out to end-of-buffer. Standard PCI host controller DMA descriptors - * implement that mode by default ... which is no accident.) - * - * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have - * converse problems: (b) is handled right, but (a) loses badly. CPPI RX - * ignores SOP/EOP markings and processes both of those BDs; so both packets - * are loaded into the buffer (with a 212 byte gap between them), and the next - * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP - * are intended as outputs for RX queues, not inputs...) - * - * - A variant of "transparent" mode -- one BD at a time -- is the only way to - * reliably make both cases work, with software handling both cases correctly - * and at the significant penalty of needing an IRQ per packet. (The lack of - * I/O overlap can be slightly ameliorated by enabling double buffering.) - * - * So how to get rid of IRQ-per-packet? The transparent multi-BD case could - * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK - * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors - * with guaranteed driver level fault recovery and scrubbing out what's left - * of that garbaged datastream. - * - * But there seems to be no way to identify the cases where CPPI RNDIS mode - * is appropriate -- which do NOT include RNDIS host drivers, but do include - * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. - * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic - * that applies best on the peripheral side (and which could fail rudely). - * - * Leaving only "transparent" mode; we avoid multi-bd modes in almost all - * cases other than mass storage class. Otherwise we're correct but slow, - * since CPPI penalizes our need for a "true RNDIS" default mode. - */ - - -/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY - * - * IFF - * (a) peripheral mode ... since rndis peripherals could pad their - * writes to hosts, causing i/o failure; or we'd have to cope with - * a largely unknowable variety of host side protocol variants - * (b) and short reads are NOT errors ... since full reads would - * cause those same i/o failures - * (c) and read length is - * - less than 64KB (max per cppi descriptor) - * - not a multiple of 4096 (g_zero default, full reads typical) - * - N (>1) packets long, ditto (full reads not EXPECTED) - * THEN - * try rx rndis mode - * - * Cost of heuristic failing: RXDMA wedges at the end of transfers that - * fill out the whole buffer. Buggy host side usb network drivers could - * trigger that, but "in the field" such bugs seem to be all but unknown. - * - * So this module parameter lets the heuristic be disabled. When using - * gadgetfs, the heuristic will probably need to be disabled. - */ -static int cppi_rx_rndis = 1; - -module_param(cppi_rx_rndis, bool, 0); -MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); - - -/** - * cppi_next_rx_segment - dma read for the next chunk of a buffer - * @musb: the controller - * @rx: dma channel - * @onepacket: true unless caller treats short reads as errors, and - * performs fault recovery above usbcore. - * Context: controller irqlocked - * - * See above notes about why we can't use multi-BD RX queues except in - * rare cases (mass storage class), and can never use the hardware "rndis" - * mode (since it's not a "true" RNDIS mode) with complete safety.. - * - * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in - * code to recover from corrupted datastreams after each short transfer. - */ -static void -cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) -{ - unsigned maxpacket = rx->maxpacket; - dma_addr_t addr = rx->buf_dma + rx->offset; - size_t length = rx->buf_len - rx->offset; - struct cppi_descriptor *bd, *tail; - unsigned n_bds; - unsigned i; - void __iomem *tibase = musb->ctrl_base; - int is_rndis = 0; - struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; - - if (onepacket) { - /* almost every USB driver, host or peripheral side */ - n_bds = 1; - - /* maybe apply the heuristic above */ - if (cppi_rx_rndis - && is_peripheral_active(musb) - && length > maxpacket - && (length & ~0xffff) == 0 - && (length & 0x0fff) != 0 - && (length & (maxpacket - 1)) == 0) { - maxpacket = length; - is_rndis = 1; - } - } else { - /* virtually nothing except mass storage class */ - if (length > 0xffff) { - n_bds = 0xffff / maxpacket; - length = n_bds * maxpacket; - } else { - n_bds = length / maxpacket; - if (length % maxpacket) - n_bds++; - } - if (n_bds == 1) - onepacket = 1; - else - n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); - } - - /* In host mode, autorequest logic can generate some IN tokens; it's - * tricky since we can't leave REQPKT set in RXCSR after the transfer - * finishes. So: multipacket transfers involve two or more segments. - * And always at least two IRQs ... RNDIS mode is not an option. - */ - if (is_host_active(musb)) - n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); - - cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); - - length = min(n_bds * maxpacket, length); - - DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " - "dma 0x%x len %u %u/%u\n", - rx->index, maxpacket, - onepacket - ? (is_rndis ? "rndis" : "onepacket") - : "multipacket", - n_bds, - musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff, - addr, length, rx->channel.actual_len, rx->buf_len); - - /* only queue one segment at a time, since the hardware prevents - * correct queue shutdown after unexpected short packets - */ - bd = cppi_bd_alloc(rx); - rx->head = bd; - - /* Build BDs for all packets in this segment */ - for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { - u32 bd_len; - - if (i) { - bd = cppi_bd_alloc(rx); - if (!bd) - break; - tail->next = bd; - tail->hw_next = bd->dma; - } - bd->hw_next = 0; - - /* all but the last packet will be maxpacket size */ - if (maxpacket < length) - bd_len = maxpacket; - else - bd_len = length; - - bd->hw_bufp = addr; - addr += bd_len; - rx->offset += bd_len; - - bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; - bd->buflen = bd_len; - - bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); - length -= bd_len; - } - - /* we always expect at least one reusable BD! */ - if (!tail) { - WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); - return; - } else if (i < n_bds) - WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); - - tail->next = NULL; - tail->hw_next = 0; - - bd = rx->head; - rx->tail = tail; - - /* short reads and other faults should terminate this entire - * dma segment. we want one "dma packet" per dma segment, not - * one per USB packet, terminating the whole queue at once... - * NOTE that current hardware seems to ignore SOP and EOP. - */ - bd->hw_options |= CPPI_SOP_SET; - tail->hw_options |= CPPI_EOP_SET; - - if (debug >= 5) { - struct cppi_descriptor *d; - - for (d = rx->head; d; d = d->next) - cppi_dump_rxbd("S", d); - } - - /* in case the preceding transfer left some state... */ - tail = rx->last_processed; - if (tail) { - tail->next = bd; - tail->hw_next = bd->dma; - } - - core_rxirq_enable(tibase, rx->index + 1); - - /* BDs live in DMA-coherent memory, but writes might be pending */ - cpu_drain_writebuffer(); - - /* REVISIT specs say to write this AFTER the BUFCNT register - * below ... but that loses badly. - */ - musb_writel(&rx_ram->rx_head, 0, bd->dma); - - /* bufferCount must be at least 3, and zeroes on completion - * unless it underflows below zero, or stops at two, or keeps - * growing ... grr. - */ - i = musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff; - - if (!i) - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds + 2); - else if (n_bds > (i - 3)) - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds - (i - 3)); - - i = musb_readl(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) - & 0xffff; - if (i < (2 + n_bds)) { - DBG(2, "bufcnt%d underrun - %d (for %d)\n", - rx->index, i, n_bds); - musb_writel(tibase, - DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), - n_bds + 2); - } - - cppi_dump_rx(4, rx, "/S"); -} - -/** - * cppi_channel_program - program channel for data transfer - * @ch: the channel - * @maxpacket: max packet size - * @mode: For RX, 1 unless the usb protocol driver promised to treat - * all short reads as errors and kick in high level fault recovery. - * For TX, ignored because of RNDIS mode races/glitches. - * @dma_addr: dma address of buffer - * @len: length of buffer - * Context: controller irqlocked - */ -static int cppi_channel_program(struct dma_channel *ch, - u16 maxpacket, u8 mode, - dma_addr_t dma_addr, u32 len) -{ - struct cppi_channel *cppi_ch; - struct cppi *controller; - struct musb *musb; - - cppi_ch = container_of(ch, struct cppi_channel, channel); - controller = cppi_ch->controller; - musb = controller->musb; - - switch (ch->status) { - case MUSB_DMA_STATUS_BUS_ABORT: - case MUSB_DMA_STATUS_CORE_ABORT: - /* fault irq handler should have handled cleanup */ - WARNING("%cX DMA%d not cleaned up after abort!\n", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - /* WARN_ON(1); */ - break; - case MUSB_DMA_STATUS_BUSY: - WARNING("program active channel? %cX DMA%d\n", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - /* WARN_ON(1); */ - break; - case MUSB_DMA_STATUS_UNKNOWN: - DBG(1, "%cX DMA%d not allocated!\n", - cppi_ch->transmit ? 'T' : 'R', - cppi_ch->index); - /* FALLTHROUGH */ - case MUSB_DMA_STATUS_FREE: - break; - } - - ch->status = MUSB_DMA_STATUS_BUSY; - - /* set transfer parameters, then queue up its first segment */ - cppi_ch->buf_dma = dma_addr; - cppi_ch->offset = 0; - cppi_ch->maxpacket = maxpacket; - cppi_ch->buf_len = len; - - /* TX channel? or RX? */ - if (cppi_ch->transmit) - cppi_next_tx_segment(musb, cppi_ch); - else - cppi_next_rx_segment(musb, cppi_ch, mode); - - return true; -} - -static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) -{ - struct cppi_channel *rx = &cppi->rx[ch]; - struct cppi_rx_stateram __iomem *state = rx->state_ram; - struct cppi_descriptor *bd; - struct cppi_descriptor *last = rx->last_processed; - bool completed = false; - bool acked = false; - int i; - dma_addr_t safe2ack; - void __iomem *regs = rx->hw_ep->regs; - - cppi_dump_rx(6, rx, "/K"); - - bd = last ? last->next : rx->head; - if (!bd) - return false; - - /* run through all completed BDs */ - for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); - (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; - i++, bd = bd->next) { - u16 len; - - /* catch latest BD writes from CPPI */ - rmb(); - if (!completed && (bd->hw_options & CPPI_OWN_SET)) - break; - - DBG(5, "C/RXBD %08x: nxt %08x buf %08x " - "off.len %08x opt.len %08x (%d)\n", - bd->dma, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options, - rx->channel.actual_len); - - /* actual packet received length */ - if ((bd->hw_options & CPPI_SOP_SET) && !completed) - len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; - else - len = 0; - - if (bd->hw_options & CPPI_EOQ_MASK) - completed = true; - - if (!completed && len < bd->buflen) { - /* NOTE: when we get a short packet, RXCSR_H_REQPKT - * must have been cleared, and no more DMA packets may - * active be in the queue... TI docs didn't say, but - * CPPI ignores those BDs even though OWN is still set. - */ - completed = true; - DBG(3, "rx short %d/%d (%d)\n", - len, bd->buflen, - rx->channel.actual_len); - } - - /* If we got here, we expect to ack at least one BD; meanwhile - * CPPI may completing other BDs while we scan this list... - * - * RACE: we can notice OWN cleared before CPPI raises the - * matching irq by writing that BD as the completion pointer. - * In such cases, stop scanning and wait for the irq, avoiding - * lost acks and states where BD ownership is unclear. - */ - if (bd->dma == safe2ack) { - musb_writel(&state->rx_complete, 0, safe2ack); - safe2ack = musb_readl(&state->rx_complete, 0); - acked = true; - if (bd->dma == safe2ack) - safe2ack = 0; - } - - rx->channel.actual_len += len; - - cppi_bd_free(rx, last); - last = bd; - - /* stop scanning on end-of-segment */ - if (bd->hw_next == 0) - completed = true; - } - rx->last_processed = last; - - /* dma abort, lost ack, or ... */ - if (!acked && last) { - int csr; - - if (safe2ack == 0 || safe2ack == rx->last_processed->dma) - musb_writel(&state->rx_complete, 0, safe2ack); - if (safe2ack == 0) { - cppi_bd_free(rx, last); - rx->last_processed = NULL; - - /* if we land here on the host side, H_REQPKT will - * be clear and we need to restart the queue... - */ - WARN_ON(rx->head); - } - musb_ep_select(cppi->mregs, rx->index + 1); - csr = musb_readw(regs, MUSB_RXCSR); - if (csr & MUSB_RXCSR_DMAENAB) { - DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", - rx->index, - rx->head, rx->tail, - rx->last_processed - ? rx->last_processed->dma - : 0, - completed ? ", completed" : "", - csr); - cppi_dump_rxq(4, "/what?", rx); - } - } - if (!completed) { - int csr; - - rx->head = bd; - - /* REVISIT seems like "autoreq all but EOP" doesn't... - * setting it here "should" be racey, but seems to work - */ - csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); - if (is_host_active(cppi->musb) - && bd - && !(csr & MUSB_RXCSR_H_REQPKT)) { - csr |= MUSB_RXCSR_H_REQPKT; - musb_writew(regs, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | csr); - csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); - } - } else { - rx->head = NULL; - rx->tail = NULL; - } - - cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); - return completed; -} - -void cppi_completion(struct musb *musb, u32 rx, u32 tx) -{ - void __iomem *tibase; - int i, index; - struct cppi *cppi; - struct musb_hw_ep *hw_ep = NULL; - - cppi = container_of(musb->dma_controller, struct cppi, controller); - - tibase = musb->ctrl_base; - - /* process TX channels */ - for (index = 0; tx; tx = tx >> 1, index++) { - struct cppi_channel *tx_ch; - struct cppi_tx_stateram __iomem *tx_ram; - bool completed = false; - struct cppi_descriptor *bd; - - if (!(tx & 1)) - continue; - - tx_ch = cppi->tx + index; - tx_ram = tx_ch->state_ram; - - /* FIXME need a cppi_tx_scan() routine, which - * can also be called from abort code - */ - - cppi_dump_tx(5, tx_ch, "/E"); - - bd = tx_ch->head; - - if (NULL == bd) { - DBG(1, "null BD\n"); - continue; - } - - /* run through all completed BDs */ - for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; - i++, bd = bd->next) { - u16 len; - - /* catch latest BD writes from CPPI */ - rmb(); - if (bd->hw_options & CPPI_OWN_SET) - break; - - DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", - bd, bd->hw_next, bd->hw_bufp, - bd->hw_off_len, bd->hw_options); - - len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; - tx_ch->channel.actual_len += len; - - tx_ch->last_processed = bd; - - /* write completion register to acknowledge - * processing of completed BDs, and possibly - * release the IRQ; EOQ might not be set ... - * - * REVISIT use the same ack strategy as rx - * - * REVISIT have observed bit 18 set; huh?? - */ - /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ - musb_writel(&tx_ram->tx_complete, 0, bd->dma); - - /* stop scanning on end-of-segment */ - if (bd->hw_next == 0) - completed = true; - } - - /* on end of segment, maybe go to next one */ - if (completed) { - /* cppi_dump_tx(4, tx_ch, "/complete"); */ - - /* transfer more, or report completion */ - if (tx_ch->offset >= tx_ch->buf_len) { - tx_ch->head = NULL; - tx_ch->tail = NULL; - tx_ch->channel.status = MUSB_DMA_STATUS_FREE; - - hw_ep = tx_ch->hw_ep; - - /* Peripheral role never repurposes the - * endpoint, so immediate completion is - * safe. Host role waits for the fifo - * to empty (TXPKTRDY irq) before going - * to the next queued bulk transfer. - */ - if (is_host_active(cppi->musb)) { -#if 0 - /* WORKAROUND because we may - * not always get TXKPTRDY ... - */ - int csr; - - csr = musb_readw(hw_ep->regs, - MUSB_TXCSR); - if (csr & MUSB_TXCSR_TXPKTRDY) -#endif - completed = false; - } - if (completed) - musb_dma_completion(musb, index + 1, 1); - - } else { - /* Bigger transfer than we could fit in - * that first batch of descriptors... - */ - cppi_next_tx_segment(musb, tx_ch); - } - } else - tx_ch->head = bd; - } - - /* Start processing the RX block */ - for (index = 0; rx; rx = rx >> 1, index++) { - - if (rx & 1) { - struct cppi_channel *rx_ch; - - rx_ch = cppi->rx + index; - - /* let incomplete dma segments finish */ - if (!cppi_rx_scan(cppi, index)) - continue; - - /* start another dma segment if needed */ - if (rx_ch->channel.actual_len != rx_ch->buf_len - && rx_ch->channel.actual_len - == rx_ch->offset) { - cppi_next_rx_segment(musb, rx_ch, 1); - continue; - } - - /* all segments completed! */ - rx_ch->channel.status = MUSB_DMA_STATUS_FREE; - - hw_ep = rx_ch->hw_ep; - - core_rxirq_disable(tibase, index + 1); - musb_dma_completion(musb, index + 1, 0); - } - } - - /* write to CPPI EOI register to re-enable interrupts */ - musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); -} - -/* Instantiate a software object representing a DMA controller. */ -struct dma_controller *__init -dma_controller_create(struct musb *musb, void __iomem *mregs) -{ - struct cppi *controller; - - controller = kzalloc(sizeof *controller, GFP_KERNEL); - if (!controller) - return NULL; - - controller->mregs = mregs; - controller->tibase = mregs - DAVINCI_BASE_OFFSET; - - controller->musb = musb; - controller->controller.start = cppi_controller_start; - controller->controller.stop = cppi_controller_stop; - controller->controller.channel_alloc = cppi_channel_allocate; - controller->controller.channel_release = cppi_channel_release; - controller->controller.channel_program = cppi_channel_program; - controller->controller.channel_abort = cppi_channel_abort; - - /* NOTE: allocating from on-chip SRAM would give the least - * contention for memory access, if that ever matters here. - */ - - /* setup BufferPool */ - controller->pool = dma_pool_create("cppi", - controller->musb->controller, - sizeof(struct cppi_descriptor), - CPPI_DESCRIPTOR_ALIGN, 0); - if (!controller->pool) { - kfree(controller); - return NULL; - } - - return &controller->controller; -} - -/* - * Destroy a previously-instantiated DMA controller. - */ -void dma_controller_destroy(struct dma_controller *c) -{ - struct cppi *cppi; - - cppi = container_of(c, struct cppi, controller); - - /* assert: caller stopped the controller first */ - dma_pool_destroy(cppi->pool); - - kfree(cppi); -} - -/* - * Context: controller irqlocked, endpoint selected - */ -static int cppi_channel_abort(struct dma_channel *channel) -{ - struct cppi_channel *cppi_ch; - struct cppi *controller; - void __iomem *mbase; - void __iomem *tibase; - void __iomem *regs; - u32 value; - struct cppi_descriptor *queue; - - cppi_ch = container_of(channel, struct cppi_channel, channel); - - controller = cppi_ch->controller; - - switch (channel->status) { - case MUSB_DMA_STATUS_BUS_ABORT: - case MUSB_DMA_STATUS_CORE_ABORT: - /* from RX or TX fault irq handler */ - case MUSB_DMA_STATUS_BUSY: - /* the hardware needs shutting down */ - regs = cppi_ch->hw_ep->regs; - break; - case MUSB_DMA_STATUS_UNKNOWN: - case MUSB_DMA_STATUS_FREE: - return 0; - default: - return -EINVAL; - } - - if (!cppi_ch->transmit && cppi_ch->head) - cppi_dump_rxq(3, "/abort", cppi_ch); - - mbase = controller->mregs; - tibase = controller->tibase; - - queue = cppi_ch->head; - cppi_ch->head = NULL; - cppi_ch->tail = NULL; - - /* REVISIT should rely on caller having done this, - * and caller should rely on us not changing it. - * peripheral code is safe ... check host too. - */ - musb_ep_select(mbase, cppi_ch->index + 1); - - if (cppi_ch->transmit) { - struct cppi_tx_stateram __iomem *tx_ram; - int enabled; - - /* mask interrupts raised to signal teardown complete. */ - enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) - & (1 << cppi_ch->index); - if (enabled) - musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, - (1 << cppi_ch->index)); - - /* REVISIT put timeouts on these controller handshakes */ - - cppi_dump_tx(6, cppi_ch, " (teardown)"); - - /* teardown DMA engine then usb core */ - do { - value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); - } while (!(value & CPPI_TEAR_READY)); - musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); - - tx_ram = cppi_ch->state_ram; - do { - value = musb_readl(&tx_ram->tx_complete, 0); - } while (0xFFFFFFFC != value); - musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); - - /* FIXME clean up the transfer state ... here? - * the completion routine should get called with - * an appropriate status code. - */ - - value = musb_readw(regs, MUSB_TXCSR); - value &= ~MUSB_TXCSR_DMAENAB; - value |= MUSB_TXCSR_FLUSHFIFO; - musb_writew(regs, MUSB_TXCSR, value); - musb_writew(regs, MUSB_TXCSR, value); - - /* re-enable interrupt */ - if (enabled) - musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, - (1 << cppi_ch->index)); - - /* While we scrub the TX state RAM, ensure that we clean - * up any interrupt that's currently asserted: - * 1. Write to completion Ptr value 0x1(bit 0 set) - * (write back mode) - * 2. Write to completion Ptr value 0x0(bit 0 cleared) - * (compare mode) - * Value written is compared(for bits 31:2) and when - * equal, interrupt is deasserted. - */ - cppi_reset_tx(tx_ram, 1); - musb_writel(&tx_ram->tx_complete, 0, 0); - - cppi_dump_tx(5, cppi_ch, " (done teardown)"); - - /* REVISIT tx side _should_ clean up the same way - * as the RX side ... this does no cleanup at all! - */ - - } else /* RX */ { - u16 csr; - - /* NOTE: docs don't guarantee any of this works ... we - * expect that if the usb core stops telling the cppi core - * to pull more data from it, then it'll be safe to flush - * current RX DMA state iff any pending fifo transfer is done. - */ - - core_rxirq_disable(tibase, cppi_ch->index + 1); - - /* for host, ensure ReqPkt is never set again */ - if (is_host_active(cppi_ch->controller->musb)) { - value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); - value &= ~((0x3) << (cppi_ch->index * 2)); - musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); - } - - csr = musb_readw(regs, MUSB_RXCSR); - - /* for host, clear (just) ReqPkt at end of current packet(s) */ - if (is_host_active(cppi_ch->controller->musb)) { - csr |= MUSB_RXCSR_H_WZC_BITS; - csr &= ~MUSB_RXCSR_H_REQPKT; - } else - csr |= MUSB_RXCSR_P_WZC_BITS; - - /* clear dma enable */ - csr &= ~(MUSB_RXCSR_DMAENAB); - musb_writew(regs, MUSB_RXCSR, csr); - csr = musb_readw(regs, MUSB_RXCSR); - - /* Quiesce: wait for current dma to finish (if not cleanup). - * We can't use bit zero of stateram->rx_sop, since that - * refers to an entire "DMA packet" not just emptying the - * current fifo. Most segments need multiple usb packets. - */ - if (channel->status == MUSB_DMA_STATUS_BUSY) - udelay(50); - - /* scan the current list, reporting any data that was - * transferred and acking any IRQ - */ - cppi_rx_scan(controller, cppi_ch->index); - - /* clobber the existing state once it's idle - * - * NOTE: arguably, we should also wait for all the other - * RX channels to quiesce (how??) and then temporarily - * disable RXCPPI_CTRL_REG ... but it seems that we can - * rely on the controller restarting from state ram, with - * only RXCPPI_BUFCNT state being bogus. BUFCNT will - * correct itself after the next DMA transfer though. - * - * REVISIT does using rndis mode change that? - */ - cppi_reset_rx(cppi_ch->state_ram); - - /* next DMA request _should_ load cppi head ptr */ - - /* ... we don't "free" that list, only mutate it in place. */ - cppi_dump_rx(5, cppi_ch, " (done abort)"); - - /* clean up previously pending bds */ - cppi_bd_free(cppi_ch, cppi_ch->last_processed); - cppi_ch->last_processed = NULL; - - while (queue) { - struct cppi_descriptor *tmp = queue->next; - - cppi_bd_free(cppi_ch, queue); - queue = tmp; - } - } - - channel->status = MUSB_DMA_STATUS_FREE; - cppi_ch->buf_dma = 0; - cppi_ch->offset = 0; - cppi_ch->buf_len = 0; - cppi_ch->maxpacket = 0; - return 0; -} - -/* TBD Queries: - * - * Power Management ... probably turn off cppi during suspend, restart; - * check state ram? Clocking is presumably shared with usb core. - */ diff --git a/trunk/drivers/usb/musb/cppi_dma.h b/trunk/drivers/usb/musb/cppi_dma.h deleted file mode 100644 index fc5216b5d2c5..000000000000 --- a/trunk/drivers/usb/musb/cppi_dma.h +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright (C) 2005-2006 by Texas Instruments */ - -#ifndef _CPPI_DMA_H_ -#define _CPPI_DMA_H_ - -#include -#include -#include -#include -#include - -#include "musb_dma.h" -#include "musb_core.h" - - -/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers - * would seem to be shared with the TUSB6020 (over VLYNQ). - */ - -#include "davinci.h" - - -/* CPPI RX/TX state RAM */ - -struct cppi_tx_stateram { - u32 tx_head; /* "DMA packet" head descriptor */ - u32 tx_buf; - u32 tx_current; /* current descriptor */ - u32 tx_buf_current; - u32 tx_info; /* flags, remaining buflen */ - u32 tx_rem_len; - u32 tx_dummy; /* unused */ - u32 tx_complete; -}; - -struct cppi_rx_stateram { - u32 rx_skipbytes; - u32 rx_head; - u32 rx_sop; /* "DMA packet" head descriptor */ - u32 rx_current; /* current descriptor */ - u32 rx_buf_current; - u32 rx_len_len; - u32 rx_cnt_cnt; - u32 rx_complete; -}; - -/* hw_options bits in CPPI buffer descriptors */ -#define CPPI_SOP_SET ((u32)(1 << 31)) -#define CPPI_EOP_SET ((u32)(1 << 30)) -#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */ -#define CPPI_EOQ_MASK ((u32)(1 << 28)) -#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */ -#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */ - -#define CPPI_RECV_PKTLEN_MASK 0xFFFF -#define CPPI_BUFFER_LEN_MASK 0xFFFF - -#define CPPI_TEAR_READY ((u32)(1 << 31)) - -/* CPPI data structure definitions */ - -#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */ - -struct cppi_descriptor { - /* hardware overlay */ - u32 hw_next; /* next buffer descriptor Pointer */ - u32 hw_bufp; /* i/o buffer pointer */ - u32 hw_off_len; /* buffer_offset16, buffer_length16 */ - u32 hw_options; /* flags: SOP, EOP etc*/ - - struct cppi_descriptor *next; - dma_addr_t dma; /* address of this descriptor */ - u32 buflen; /* for RX: original buffer length */ -} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN))); - - -struct cppi; - -/* CPPI Channel Control structure */ -struct cppi_channel { - struct dma_channel channel; - - /* back pointer to the DMA controller structure */ - struct cppi *controller; - - /* which direction of which endpoint? */ - struct musb_hw_ep *hw_ep; - bool transmit; - u8 index; - - /* DMA modes: RNDIS or "transparent" */ - u8 is_rndis; - - /* book keeping for current transfer request */ - dma_addr_t buf_dma; - u32 buf_len; - u32 maxpacket; - u32 offset; /* dma requested */ - - void __iomem *state_ram; /* CPPI state */ - - struct cppi_descriptor *freelist; - - /* BD management fields */ - struct cppi_descriptor *head; - struct cppi_descriptor *tail; - struct cppi_descriptor *last_processed; - - /* use tx_complete in host role to track endpoints waiting for - * FIFONOTEMPTY to clear. - */ - struct list_head tx_complete; -}; - -/* CPPI DMA controller object */ -struct cppi { - struct dma_controller controller; - struct musb *musb; - void __iomem *mregs; /* Mentor regs */ - void __iomem *tibase; /* TI/CPPI regs */ - - struct cppi_channel tx[MUSB_C_NUM_EPT - 1]; - struct cppi_channel rx[MUSB_C_NUM_EPR - 1]; - - struct dma_pool *pool; - - struct list_head tx_complete; -}; - -/* irq handling hook */ -extern void cppi_completion(struct musb *, u32 rx, u32 tx); - -#endif /* end of ifndef _CPPI_DMA_H_ */ diff --git a/trunk/drivers/usb/musb/davinci.c b/trunk/drivers/usb/musb/davinci.c deleted file mode 100644 index 75baf181a8cd..000000000000 --- a/trunk/drivers/usb/musb/davinci.c +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * This file is part of the Inventra Controller Driver for Linux. - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - * - * The Inventra Controller Driver for Linux is distributed in - * the hope that it will be useful, but WITHOUT ANY WARRANTY; - * without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public - * License for more details. - * - * You should have received a copy of the GNU General Public License - * along with The Inventra Controller Driver for Linux ; if not, - * write to the Free Software Foundation, Inc., 59 Temple Place, - * Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "musb_core.h" - -#ifdef CONFIG_MACH_DAVINCI_EVM -#include -#endif - -#include "davinci.h" -#include "cppi_dma.h" - - -/* REVISIT (PM) we should be able to keep the PHY in low power mode most - * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 - * and, when in host mode, autosuspending idle root ports... PHYPLLON - * (overriding SUSPENDM?) then likely needs to stay off. - */ - -static inline void phy_on(void) -{ - /* start the on-chip PHY and its PLL */ - __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON, - (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR)); - while ((__raw_readl((void __force __iomem *) - IO_ADDRESS(USBPHY_CTL_PADDR)) - & USBPHY_PHYCLKGD) == 0) - cpu_relax(); -} - -static inline void phy_off(void) -{ - /* powerdown the on-chip PHY and its oscillator */ - __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *) - IO_ADDRESS(USBPHY_CTL_PADDR)); -} - -static int dma_off = 1; - -void musb_platform_enable(struct musb *musb) -{ - u32 tmp, old, val; - - /* workaround: setup irqs through both register sets */ - tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) - << DAVINCI_USB_TXINT_SHIFT; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - old = tmp; - tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) - << DAVINCI_USB_RXINT_SHIFT; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - tmp |= old; - - val = ~MUSB_INTR_SOF; - tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); - - if (is_dma_capable() && !dma_off) - printk(KERN_WARNING "%s %s: dma not reactivated\n", - __FILE__, __func__); - else - dma_off = 0; - - /* force a DRVVBUS irq so we can start polling for ID change */ - if (is_otg_enabled(musb)) - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, - DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); -} - -/* - * Disable the HDRC and flush interrupts - */ -void musb_platform_disable(struct musb *musb) -{ - /* because we don't set CTRLR.UINT, "important" to: - * - not read/write INTRUSB/INTRUSBE - * - (except during initial setup, as workaround) - * - use INTSETR/INTCLRR instead - */ - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, - DAVINCI_USB_USBINT_MASK - | DAVINCI_USB_TXINT_MASK - | DAVINCI_USB_RXINT_MASK); - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); - musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); - - if (is_dma_capable() && !dma_off) - WARNING("dma still active\n"); -} - - -/* REVISIT it's not clear whether DaVinci can support full OTG. */ - -static int vbus_state = -1; - -#ifdef CONFIG_USB_MUSB_HDRC_HCD -#define portstate(stmt) stmt -#else -#define portstate(stmt) -#endif - - -/* VBUS SWITCHING IS BOARD-SPECIFIC */ - -#ifdef CONFIG_MACH_DAVINCI_EVM -#ifndef CONFIG_MACH_DAVINCI_EVM_OTG - -/* I2C operations are always synchronous, and require a task context. - * With unloaded systems, using the shared workqueue seems to suffice - * to satisfy the 100msec A_WAIT_VRISE timeout... - */ -static void evm_deferred_drvvbus(struct work_struct *ignored) -{ - davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state); - vbus_state = !vbus_state; -} -static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); - -#endif /* modified board */ -#endif /* EVM */ - -static void davinci_source_power(struct musb *musb, int is_on, int immediate) -{ - if (is_on) - is_on = 1; - - if (vbus_state == is_on) - return; - vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ - -#ifdef CONFIG_MACH_DAVINCI_EVM - if (machine_is_davinci_evm()) { -#ifdef CONFIG_MACH_DAVINCI_EVM_OTG - /* modified EVM board switching VBUS with GPIO(6) not I2C - * NOTE: PINMUX0.RGB888 (bit23) must be clear - */ - if (is_on) - gpio_set(GPIO(6)); - else - gpio_clear(GPIO(6)); - immediate = 1; -#else - if (immediate) - davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on); - else - schedule_work(&evm_vbus_work); -#endif - } -#endif - if (immediate) - vbus_state = is_on; -} - -static void davinci_set_vbus(struct musb *musb, int is_on) -{ - WARN_ON(is_on && is_peripheral_active(musb)); - davinci_source_power(musb, is_on, 0); -} - - -#define POLL_SECONDS 2 - -static struct timer_list otg_workaround; - -static void otg_timer(unsigned long _musb) -{ - struct musb *musb = (void *)_musb; - void __iomem *mregs = musb->mregs; - u8 devctl; - unsigned long flags; - - /* We poll because DaVinci's won't expose several OTG-critical - * status change events (from the transceiver) otherwise. - */ - devctl = musb_readb(mregs, MUSB_DEVCTL); - DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); - - spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv.state) { - case OTG_STATE_A_WAIT_VFALL: - /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL - * seems to mis-handle session "start" otherwise (or in our - * case "recover"), in routine "VBUS was valid by the time - * VBUSERR got reported during enumeration" cases. - */ - if (devctl & MUSB_DEVCTL_VBUS) { - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - break; - } - musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; - musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, - MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); - break; - case OTG_STATE_B_IDLE: - if (!is_peripheral_enabled(musb)) - break; - - /* There's no ID-changed IRQ, so we have no good way to tell - * when to switch to the A-Default state machine (by setting - * the DEVCTL.SESSION flag). - * - * Workaround: whenever we're in B_IDLE, try setting the - * session flag every few seconds. If it works, ID was - * grounded and we're now in the A-Default state machine. - * - * NOTE setting the session flag is _supposed_ to trigger - * SRP, but clearly it doesn't. - */ - musb_writeb(mregs, MUSB_DEVCTL, - devctl | MUSB_DEVCTL_SESSION); - devctl = musb_readb(mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - else - musb->xceiv.state = OTG_STATE_A_IDLE; - break; - default: - break; - } - spin_unlock_irqrestore(&musb->lock, flags); -} - -static irqreturn_t davinci_interrupt(int irq, void *__hci) -{ - unsigned long flags; - irqreturn_t retval = IRQ_NONE; - struct musb *musb = __hci; - void __iomem *tibase = musb->ctrl_base; - u32 tmp; - - spin_lock_irqsave(&musb->lock, flags); - - /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through - * the Mentor registers (except for setup), use the TI ones and EOI. - * - * Docs describe irq "vector" registers asociated with the CPPI and - * USB EOI registers. These hold a bitmask corresponding to the - * current IRQ, not an irq handler address. Would using those bits - * resolve some of the races observed in this dispatch code?? - */ - - /* CPPI interrupts share the same IRQ line, but have their own - * mask, state, "vector", and EOI registers. - */ - if (is_cppi_enabled()) { - u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); - u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); - - if (cppi_tx || cppi_rx) { - DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx); - cppi_completion(musb, cppi_rx, cppi_tx); - retval = IRQ_HANDLED; - } - } - - /* ack and handle non-CPPI interrupts */ - tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); - musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); - DBG(4, "IRQ %08x\n", tmp); - - musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) - >> DAVINCI_USB_RXINT_SHIFT; - musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) - >> DAVINCI_USB_TXINT_SHIFT; - musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) - >> DAVINCI_USB_USBINT_SHIFT; - - /* DRVVBUS irqs are the only proxy we have (a very poor one!) for - * DaVinci's missing ID change IRQ. We need an ID change IRQ to - * switch appropriately between halves of the OTG state machine. - * Managing DEVCTL.SESSION per Mentor docs requires we know its - * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. - * Also, DRVVBUS pulses for SRP (but not at 5V) ... - */ - if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { - int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); - void __iomem *mregs = musb->mregs; - u8 devctl = musb_readb(mregs, MUSB_DEVCTL); - int err = musb->int_usb & MUSB_INTR_VBUSERROR; - - err = is_host_enabled(musb) - && (musb->int_usb & MUSB_INTR_VBUSERROR); - if (err) { - /* The Mentor core doesn't debounce VBUS as needed - * to cope with device connect current spikes. This - * means it's not uncommon for bus-powered devices - * to get VBUS errors during enumeration. - * - * This is a workaround, but newer RTL from Mentor - * seems to allow a better one: "re"starting sessions - * without waiting (on EVM, a **long** time) for VBUS - * to stop registering in devctl. - */ - musb->int_usb &= ~MUSB_INTR_VBUSERROR; - musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - WARNING("VBUS error workaround (delay coming)\n"); - } else if (is_host_enabled(musb) && drvvbus) { - musb->is_active = 1; - MUSB_HST_MODE(musb); - musb->xceiv.default_a = 1; - musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; - portstate(musb->port1_status |= USB_PORT_STAT_POWER); - del_timer(&otg_workaround); - } else { - musb->is_active = 0; - MUSB_DEV_MODE(musb); - musb->xceiv.default_a = 0; - musb->xceiv.state = OTG_STATE_B_IDLE; - portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); - } - - /* NOTE: this must complete poweron within 100 msec */ - davinci_source_power(musb, drvvbus, 0); - DBG(2, "VBUS %s (%s)%s, devctl %02x\n", - drvvbus ? "on" : "off", - otg_state_string(musb), - err ? " ERROR" : "", - devctl); - retval = IRQ_HANDLED; - } - - if (musb->int_tx || musb->int_rx || musb->int_usb) - retval |= musb_interrupt(musb); - - /* irq stays asserted until EOI is written */ - musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); - - /* poll for ID change */ - if (is_otg_enabled(musb) - && musb->xceiv.state == OTG_STATE_B_IDLE) - mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); - - spin_unlock_irqrestore(&musb->lock, flags); - - /* REVISIT we sometimes get unhandled IRQs - * (e.g. ep0). not clear why... - */ - if (retval != IRQ_HANDLED) - DBG(5, "unhandled? %08x\n", tmp); - return IRQ_HANDLED; -} - -int __init musb_platform_init(struct musb *musb) -{ - void __iomem *tibase = musb->ctrl_base; - u32 revision; - - musb->mregs += DAVINCI_BASE_OFFSET; -#if 0 - /* REVISIT there's something odd about clocking, this - * didn't appear do the job ... - */ - musb->clock = clk_get(pDevice, "usb"); - if (IS_ERR(musb->clock)) - return PTR_ERR(musb->clock); - - status = clk_enable(musb->clock); - if (status < 0) - return -ENODEV; -#endif - - /* returns zero if e.g. not clocked */ - revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); - if (revision == 0) - return -ENODEV; - - if (is_host_enabled(musb)) - setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); - - musb->board_set_vbus = davinci_set_vbus; - davinci_source_power(musb, 0, 1); - - /* reset the controller */ - musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); - - /* start the on-chip PHY and its PLL */ - phy_on(); - - msleep(5); - - /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ - pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", - revision, __raw_readl((void __force __iomem *) - IO_ADDRESS(USBPHY_CTL_PADDR)), - musb_readb(tibase, DAVINCI_USB_CTRL_REG)); - - musb->isr = davinci_interrupt; - return 0; -} - -int musb_platform_exit(struct musb *musb) -{ - if (is_host_enabled(musb)) - del_timer_sync(&otg_workaround); - - davinci_source_power(musb, 0 /*off*/, 1); - - /* delay, to avoid problems with module reload */ - if (is_host_enabled(musb) && musb->xceiv.default_a) { - int maxdelay = 30; - u8 devctl, warn = 0; - - /* if there's no peripheral connected, this can take a - * long time to fall, especially on EVM with huge C133. - */ - do { - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (!(devctl & MUSB_DEVCTL_VBUS)) - break; - if ((devctl & MUSB_DEVCTL_VBUS) != warn) { - warn = devctl & MUSB_DEVCTL_VBUS; - DBG(1, "VBUS %d\n", - warn >> MUSB_DEVCTL_VBUS_SHIFT); - } - msleep(1000); - maxdelay--; - } while (maxdelay > 0); - - /* in OTG mode, another host might be connected */ - if (devctl & MUSB_DEVCTL_VBUS) - DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); - } - - phy_off(); - return 0; -} diff --git a/trunk/drivers/usb/musb/davinci.h b/trunk/drivers/usb/musb/davinci.h deleted file mode 100644 index 7fb6238e270f..000000000000 --- a/trunk/drivers/usb/musb/davinci.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - */ - -#ifndef __MUSB_HDRDF_H__ -#define __MUSB_HDRDF_H__ - -/* - * DaVinci-specific definitions - */ - -/* Integrated highspeed/otg PHY */ -#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) -#define USBPHY_PHYCLKGD (1 << 8) -#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */ -#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */ -#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */ -#define USBPHY_CLKO1SEL (1 << 3) -#define USBPHY_OSCPDWN (1 << 2) -#define USBPHY_PHYPDWN (1 << 0) - -/* For now include usb OTG module registers here */ -#define DAVINCI_USB_VERSION_REG 0x00 -#define DAVINCI_USB_CTRL_REG 0x04 -#define DAVINCI_USB_STAT_REG 0x08 -#define DAVINCI_RNDIS_REG 0x10 -#define DAVINCI_AUTOREQ_REG 0x14 -#define DAVINCI_USB_INT_SOURCE_REG 0x20 -#define DAVINCI_USB_INT_SET_REG 0x24 -#define DAVINCI_USB_INT_SRC_CLR_REG 0x28 -#define DAVINCI_USB_INT_MASK_REG 0x2c -#define DAVINCI_USB_INT_MASK_SET_REG 0x30 -#define DAVINCI_USB_INT_MASK_CLR_REG 0x34 -#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38 -#define DAVINCI_USB_EOI_REG 0x3c -#define DAVINCI_USB_EOI_INTVEC 0x40 - -/* BEGIN CPPI-generic (?) */ - -/* CPPI related registers */ -#define DAVINCI_TXCPPI_CTRL_REG 0x80 -#define DAVINCI_TXCPPI_TEAR_REG 0x84 -#define DAVINCI_CPPI_EOI_REG 0x88 -#define DAVINCI_CPPI_INTVEC_REG 0x8c -#define DAVINCI_TXCPPI_MASKED_REG 0x90 -#define DAVINCI_TXCPPI_RAW_REG 0x94 -#define DAVINCI_TXCPPI_INTENAB_REG 0x98 -#define DAVINCI_TXCPPI_INTCLR_REG 0x9c - -#define DAVINCI_RXCPPI_CTRL_REG 0xC0 -#define DAVINCI_RXCPPI_MASKED_REG 0xD0 -#define DAVINCI_RXCPPI_RAW_REG 0xD4 -#define DAVINCI_RXCPPI_INTENAB_REG 0xD8 -#define DAVINCI_RXCPPI_INTCLR_REG 0xDC - -#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0 -#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4 -#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8 -#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC - -/* CPPI state RAM entries */ -#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100 - -#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \ - (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40)) -#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \ - (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40)) - -/* CPPI masks */ -#define DAVINCI_DMA_CTRL_ENABLE 1 -#define DAVINCI_DMA_CTRL_DISABLE 0 - -#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF -#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF - -/* END CPPI-generic (?) */ - -#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */ -#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */ - -#define DAVINCI_USB_USBINT_SHIFT 16 -#define DAVINCI_USB_TXINT_SHIFT 0 -#define DAVINCI_USB_RXINT_SHIFT 8 - -#define DAVINCI_INTR_DRVVBUS 0x0100 - -#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */ -#define DAVINCI_USB_TXINT_MASK \ - (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT) -#define DAVINCI_USB_RXINT_MASK \ - (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT) - -#define DAVINCI_BASE_OFFSET 0x400 - -#endif /* __MUSB_HDRDF_H__ */ diff --git a/trunk/drivers/usb/musb/musb_core.c b/trunk/drivers/usb/musb/musb_core.c deleted file mode 100644 index d68ec6daf335..000000000000 --- a/trunk/drivers/usb/musb/musb_core.c +++ /dev/null @@ -1,2261 +0,0 @@ -/* - * MUSB OTG driver core code - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* - * Inventra (Multipoint) Dual-Role Controller Driver for Linux. - * - * This consists of a Host Controller Driver (HCD) and a peripheral - * controller driver implementing the "Gadget" API; OTG support is - * in the works. These are normal Linux-USB controller drivers which - * use IRQs and have no dedicated thread. - * - * This version of the driver has only been used with products from - * Texas Instruments. Those products integrate the Inventra logic - * with other DMA, IRQ, and bus modules, as well as other logic that - * needs to be reflected in this driver. - * - * - * NOTE: the original Mentor code here was pretty much a collection - * of mechanisms that don't seem to have been fully integrated/working - * for *any* Linux kernel version. This version aims at Linux 2.6.now, - * Key open issues include: - * - * - Lack of host-side transaction scheduling, for all transfer types. - * The hardware doesn't do it; instead, software must. - * - * This is not an issue for OTG devices that don't support external - * hubs, but for more "normal" USB hosts it's a user issue that the - * "multipoint" support doesn't scale in the expected ways. That - * includes DaVinci EVM in a common non-OTG mode. - * - * * Control and bulk use dedicated endpoints, and there's as - * yet no mechanism to either (a) reclaim the hardware when - * peripherals are NAKing, which gets complicated with bulk - * endpoints, or (b) use more than a single bulk endpoint in - * each direction. - * - * RESULT: one device may be perceived as blocking another one. - * - * * Interrupt and isochronous will dynamically allocate endpoint - * hardware, but (a) there's no record keeping for bandwidth; - * (b) in the common case that few endpoints are available, there - * is no mechanism to reuse endpoints to talk to multiple devices. - * - * RESULT: At one extreme, bandwidth can be overcommitted in - * some hardware configurations, no faults will be reported. - * At the other extreme, the bandwidth capabilities which do - * exist tend to be severely undercommitted. You can't yet hook - * up both a keyboard and a mouse to an external USB hub. - */ - -/* - * This gets many kinds of configuration information: - * - Kconfig for everything user-configurable - * - for SOC or family details - * - platform_device for addressing, irq, and platform_data - * - platform_data is mostly for board-specific informarion - * - * Most of the conditional compilation will (someday) vanish. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_ARM -#include -#include -#include -#endif - -#include "musb_core.h" - - -#ifdef CONFIG_ARCH_DAVINCI -#include "davinci.h" -#endif - - - -#if MUSB_DEBUG > 0 -unsigned debug = MUSB_DEBUG; -module_param(debug, uint, 0); -MODULE_PARM_DESC(debug, "initial debug message level"); - -#define MUSB_VERSION_SUFFIX "/dbg" -#endif - -#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" -#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" - -#define MUSB_VERSION_BASE "6.0" - -#ifndef MUSB_VERSION_SUFFIX -#define MUSB_VERSION_SUFFIX "" -#endif -#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX - -#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION - -#define MUSB_DRIVER_NAME "musb_hdrc" -const char musb_driver_name[] = MUSB_DRIVER_NAME; - -MODULE_DESCRIPTION(DRIVER_INFO); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); - - -/*-------------------------------------------------------------------------*/ - -static inline struct musb *dev_to_musb(struct device *dev) -{ -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* usbcore insists dev->driver_data is a "struct hcd *" */ - return hcd_to_musb(dev_get_drvdata(dev)); -#else - return dev_get_drvdata(dev); -#endif -} - -/*-------------------------------------------------------------------------*/ - -#ifndef CONFIG_USB_TUSB6010 -/* - * Load an endpoint's FIFO - */ -void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) -{ - void __iomem *fifo = hw_ep->fifo; - - prefetch((u8 *)src); - - DBG(4, "%cX ep%d fifo %p count %d buf %p\n", - 'T', hw_ep->epnum, fifo, len, src); - - /* we can't assume unaligned reads work */ - if (likely((0x01 & (unsigned long) src) == 0)) { - u16 index = 0; - - /* best case is 32bit-aligned source address */ - if ((0x02 & (unsigned long) src) == 0) { - if (len >= 4) { - writesl(fifo, src + index, len >> 2); - index += len & ~0x03; - } - if (len & 0x02) { - musb_writew(fifo, 0, *(u16 *)&src[index]); - index += 2; - } - } else { - if (len >= 2) { - writesw(fifo, src + index, len >> 1); - index += len & ~0x01; - } - } - if (len & 0x01) - musb_writeb(fifo, 0, src[index]); - } else { - /* byte aligned */ - writesb(fifo, src, len); - } -} - -/* - * Unload an endpoint's FIFO - */ -void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) -{ - void __iomem *fifo = hw_ep->fifo; - - DBG(4, "%cX ep%d fifo %p count %d buf %p\n", - 'R', hw_ep->epnum, fifo, len, dst); - - /* we can't assume unaligned writes work */ - if (likely((0x01 & (unsigned long) dst) == 0)) { - u16 index = 0; - - /* best case is 32bit-aligned destination address */ - if ((0x02 & (unsigned long) dst) == 0) { - if (len >= 4) { - readsl(fifo, dst, len >> 2); - index = len & ~0x03; - } - if (len & 0x02) { - *(u16 *)&dst[index] = musb_readw(fifo, 0); - index += 2; - } - } else { - if (len >= 2) { - readsw(fifo, dst, len >> 1); - index = len & ~0x01; - } - } - if (len & 0x01) - dst[index] = musb_readb(fifo, 0); - } else { - /* byte aligned */ - readsb(fifo, dst, len); - } -} - -#endif /* normal PIO */ - - -/*-------------------------------------------------------------------------*/ - -/* for high speed test mode; see USB 2.0 spec 7.1.20 */ -static const u8 musb_test_packet[53] = { - /* implicit SYNC then DATA0 to start */ - - /* JKJKJKJK x9 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* JJKKJJKK x8 */ - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - /* JJJJKKKK x8 */ - 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, - /* JJJJJJJKKKKKKK x8 */ - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - /* JJJJJJJK x8 */ - 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, - /* JKKKKKKK x10, JK */ - 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e - - /* implicit CRC16 then EOP to end */ -}; - -void musb_load_testpacket(struct musb *musb) -{ - void __iomem *regs = musb->endpoints[0].regs; - - musb_ep_select(musb->mregs, 0); - musb_write_fifo(musb->control_ep, - sizeof(musb_test_packet), musb_test_packet); - musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); -} - -/*-------------------------------------------------------------------------*/ - -const char *otg_state_string(struct musb *musb) -{ - switch (musb->xceiv.state) { - case OTG_STATE_A_IDLE: return "a_idle"; - case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; - case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; - case OTG_STATE_A_HOST: return "a_host"; - case OTG_STATE_A_SUSPEND: return "a_suspend"; - case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; - case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; - case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; - case OTG_STATE_B_IDLE: return "b_idle"; - case OTG_STATE_B_SRP_INIT: return "b_srp_init"; - case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; - case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; - case OTG_STATE_B_HOST: return "b_host"; - default: return "UNDEFINED"; - } -} - -#ifdef CONFIG_USB_MUSB_OTG - -/* - * See also USB_OTG_1-3.pdf 6.6.5 Timers - * REVISIT: Are the other timers done in the hardware? - */ -#define TB_ASE0_BRST 100 /* Min 3.125 ms */ - -/* - * Handles OTG hnp timeouts, such as b_ase0_brst - */ -void musb_otg_timer_func(unsigned long data) -{ - struct musb *musb = (struct musb *)data; - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - switch (musb->xceiv.state) { - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); - musb_g_disconnect(musb); - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - musb->is_active = 0; - break; - case OTG_STATE_A_WAIT_BCON: - DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n"); - musb_hnp_stop(musb); - break; - default: - DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); - } - musb->ignore_disconnect = 0; - spin_unlock_irqrestore(&musb->lock, flags); -} - -static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0); - -/* - * Stops the B-device HNP state. Caller must take care of locking. - */ -void musb_hnp_stop(struct musb *musb) -{ - struct usb_hcd *hcd = musb_to_hcd(musb); - void __iomem *mbase = musb->mregs; - u8 reg; - - switch (musb->xceiv.state) { - case OTG_STATE_A_PERIPHERAL: - case OTG_STATE_A_WAIT_VFALL: - case OTG_STATE_A_WAIT_BCON: - DBG(1, "HNP: Switching back to A-host\n"); - musb_g_disconnect(musb); - musb->xceiv.state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - musb->is_active = 0; - break; - case OTG_STATE_B_HOST: - DBG(1, "HNP: Disabling HR\n"); - hcd->self.is_b_host = 0; - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - MUSB_DEV_MODE(musb); - reg = musb_readb(mbase, MUSB_POWER); - reg |= MUSB_POWER_SUSPENDM; - musb_writeb(mbase, MUSB_POWER, reg); - /* REVISIT: Start SESSION_REQUEST here? */ - break; - default: - DBG(1, "HNP: Stopping in unknown state %s\n", - otg_state_string(musb)); - } - - /* - * When returning to A state after HNP, avoid hub_port_rebounce(), - * which cause occasional OPT A "Did not receive reset after connect" - * errors. - */ - musb->port1_status &= - ~(1 << USB_PORT_FEAT_C_CONNECTION); -} - -#endif - -/* - * Interrupt Service Routine to record USB "global" interrupts. - * Since these do not happen often and signify things of - * paramount importance, it seems OK to check them individually; - * the order of the tests is specified in the manual - * - * @param musb instance pointer - * @param int_usb register contents - * @param devctl - * @param power - */ - -#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ - | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ - | MUSB_INTR_RESET) - -static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, - u8 devctl, u8 power) -{ - irqreturn_t handled = IRQ_NONE; - void __iomem *mbase = musb->mregs; - - DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, - int_usb); - - /* in host mode, the peripheral may issue remote wakeup. - * in peripheral mode, the host may resume the link. - * spurious RESUME irqs happen too, paired with SUSPEND. - */ - if (int_usb & MUSB_INTR_RESUME) { - handled = IRQ_HANDLED; - DBG(3, "RESUME (%s)\n", otg_state_string(musb)); - - if (devctl & MUSB_DEVCTL_HM) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - switch (musb->xceiv.state) { - case OTG_STATE_A_SUSPEND: - /* remote wakeup? later, GetPortStatus - * will stop RESUME signaling - */ - - if (power & MUSB_POWER_SUSPENDM) { - /* spurious */ - musb->int_usb &= ~MUSB_INTR_SUSPEND; - DBG(2, "Spurious SUSPENDM\n"); - break; - } - - power &= ~MUSB_POWER_SUSPENDM; - musb_writeb(mbase, MUSB_POWER, - power | MUSB_POWER_RESUME); - - musb->port1_status |= - (USB_PORT_STAT_C_SUSPEND << 16) - | MUSB_PORT_STAT_RESUME; - musb->rh_timer = jiffies - + msecs_to_jiffies(20); - - musb->xceiv.state = OTG_STATE_A_HOST; - musb->is_active = 1; - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - break; - case OTG_STATE_B_WAIT_ACON: - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - musb->is_active = 1; - MUSB_DEV_MODE(musb); - break; - default: - WARNING("bogus %s RESUME (%s)\n", - "host", - otg_state_string(musb)); - } -#endif - } else { - switch (musb->xceiv.state) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_SUSPEND: - /* possibly DISCONNECT is upcoming */ - musb->xceiv.state = OTG_STATE_A_HOST; - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - break; -#endif -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - case OTG_STATE_B_WAIT_ACON: - case OTG_STATE_B_PERIPHERAL: - /* disconnect while suspended? we may - * not get a disconnect irq... - */ - if ((devctl & MUSB_DEVCTL_VBUS) - != (3 << MUSB_DEVCTL_VBUS_SHIFT) - ) { - musb->int_usb |= MUSB_INTR_DISCONNECT; - musb->int_usb &= ~MUSB_INTR_SUSPEND; - break; - } - musb_g_resume(musb); - break; - case OTG_STATE_B_IDLE: - musb->int_usb &= ~MUSB_INTR_SUSPEND; - break; -#endif - default: - WARNING("bogus %s RESUME (%s)\n", - "peripheral", - otg_state_string(musb)); - } - } - } - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* see manual for the order of the tests */ - if (int_usb & MUSB_INTR_SESSREQ) { - DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); - - /* IRQ arrives from ID pin sense or (later, if VBUS power - * is removed) SRP. responses are time critical: - * - turn on VBUS (with silicon-specific mechanism) - * - go through A_WAIT_VRISE - * - ... to A_WAIT_BCON. - * a_wait_vrise_tmout triggers VBUS_ERROR transitions - */ - musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); - musb->ep0_stage = MUSB_EP0_START; - musb->xceiv.state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - musb_set_vbus(musb, 1); - - handled = IRQ_HANDLED; - } - - if (int_usb & MUSB_INTR_VBUSERROR) { - int ignore = 0; - - /* During connection as an A-Device, we may see a short - * current spikes causing voltage drop, because of cable - * and peripheral capacitance combined with vbus draw. - * (So: less common with truly self-powered devices, where - * vbus doesn't act like a power supply.) - * - * Such spikes are short; usually less than ~500 usec, max - * of ~2 msec. That is, they're not sustained overcurrent - * errors, though they're reported using VBUSERROR irqs. - * - * Workarounds: (a) hardware: use self powered devices. - * (b) software: ignore non-repeated VBUS errors. - * - * REVISIT: do delays from lots of DEBUG_KERNEL checks - * make trouble here, keeping VBUS < 4.4V ? - */ - switch (musb->xceiv.state) { - case OTG_STATE_A_HOST: - /* recovery is dicey once we've gotten past the - * initial stages of enumeration, but if VBUS - * stayed ok at the other end of the link, and - * another reset is due (at least for high speed, - * to redo the chirp etc), it might work OK... - */ - case OTG_STATE_A_WAIT_BCON: - case OTG_STATE_A_WAIT_VRISE: - if (musb->vbuserr_retry) { - musb->vbuserr_retry--; - ignore = 1; - devctl |= MUSB_DEVCTL_SESSION; - musb_writeb(mbase, MUSB_DEVCTL, devctl); - } else { - musb->port1_status |= - (1 << USB_PORT_FEAT_OVER_CURRENT) - | (1 << USB_PORT_FEAT_C_OVER_CURRENT); - } - break; - default: - break; - } - - DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", - otg_state_string(musb), - devctl, - ({ char *s; - switch (devctl & MUSB_DEVCTL_VBUS) { - case 0 << MUSB_DEVCTL_VBUS_SHIFT: - s = "vbuserr_retry, - musb->port1_status); - - /* go through A_WAIT_VFALL then start a new session */ - if (!ignore) - musb_set_vbus(musb, 0); - handled = IRQ_HANDLED; - } - - if (int_usb & MUSB_INTR_CONNECT) { - struct usb_hcd *hcd = musb_to_hcd(musb); - - handled = IRQ_HANDLED; - musb->is_active = 1; - set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); - - musb->ep0_stage = MUSB_EP0_START; - -#ifdef CONFIG_USB_MUSB_OTG - /* flush endpoints when transitioning from Device Mode */ - if (is_peripheral_active(musb)) { - /* REVISIT HNP; just force disconnect */ - } - musb_writew(mbase, MUSB_INTRTXE, musb->epmask); - musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe); - musb_writeb(mbase, MUSB_INTRUSBE, 0xf7); -#endif - musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED - |USB_PORT_STAT_HIGH_SPEED - |USB_PORT_STAT_ENABLE - ); - musb->port1_status |= USB_PORT_STAT_CONNECTION - |(USB_PORT_STAT_C_CONNECTION << 16); - - /* high vs full speed is just a guess until after reset */ - if (devctl & MUSB_DEVCTL_LSDEV) - musb->port1_status |= USB_PORT_STAT_LOW_SPEED; - - if (hcd->status_urb) - usb_hcd_poll_rh_status(hcd); - else - usb_hcd_resume_root_hub(hcd); - - MUSB_HST_MODE(musb); - - /* indicate new connection to OTG machine */ - switch (musb->xceiv.state) { - case OTG_STATE_B_PERIPHERAL: - if (int_usb & MUSB_INTR_SUSPEND) { - DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); - musb->xceiv.state = OTG_STATE_B_HOST; - hcd->self.is_b_host = 1; - int_usb &= ~MUSB_INTR_SUSPEND; - } else - DBG(1, "CONNECT as b_peripheral???\n"); - break; - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: Waiting to switch to b_host state\n"); - musb->xceiv.state = OTG_STATE_B_HOST; - hcd->self.is_b_host = 1; - break; - default: - if ((devctl & MUSB_DEVCTL_VBUS) - == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { - musb->xceiv.state = OTG_STATE_A_HOST; - hcd->self.is_b_host = 0; - } - break; - } - DBG(1, "CONNECT (%s) devctl %02x\n", - otg_state_string(musb), devctl); - } -#endif /* CONFIG_USB_MUSB_HDRC_HCD */ - - /* mentor saves a bit: bus reset and babble share the same irq. - * only host sees babble; only peripheral sees bus reset. - */ - if (int_usb & MUSB_INTR_RESET) { - if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { - /* - * Looks like non-HS BABBLE can be ignored, but - * HS BABBLE is an error condition. For HS the solution - * is to avoid babble in the first place and fix what - * caused BABBLE. When HS BABBLE happens we can only - * stop the session. - */ - if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) - DBG(1, "BABBLE devctl: %02x\n", devctl); - else { - ERR("Stopping host session -- babble\n"); - musb_writeb(mbase, MUSB_DEVCTL, 0); - } - } else if (is_peripheral_capable()) { - DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); - switch (musb->xceiv.state) { -#ifdef CONFIG_USB_OTG - case OTG_STATE_A_SUSPEND: - /* We need to ignore disconnect on suspend - * otherwise tusb 2.0 won't reconnect after a - * power cycle, which breaks otg compliance. - */ - musb->ignore_disconnect = 1; - musb_g_reset(musb); - /* FALLTHROUGH */ - case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ - DBG(1, "HNP: Setting timer as %s\n", - otg_state_string(musb)); - musb_otg_timer.data = (unsigned long)musb; - mod_timer(&musb_otg_timer, jiffies - + msecs_to_jiffies(100)); - break; - case OTG_STATE_A_PERIPHERAL: - musb_hnp_stop(musb); - break; - case OTG_STATE_B_WAIT_ACON: - DBG(1, "HNP: RESET (%s), to b_peripheral\n", - otg_state_string(musb)); - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - musb_g_reset(musb); - break; -#endif - case OTG_STATE_B_IDLE: - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - /* FALLTHROUGH */ - case OTG_STATE_B_PERIPHERAL: - musb_g_reset(musb); - break; - default: - DBG(1, "Unhandled BUS RESET as %s\n", - otg_state_string(musb)); - } - } - - handled = IRQ_HANDLED; - } - schedule_work(&musb->irq_work); - - return handled; -} - -/* - * Interrupt Service Routine to record USB "global" interrupts. - * Since these do not happen often and signify things of - * paramount importance, it seems OK to check them individually; - * the order of the tests is specified in the manual - * - * @param musb instance pointer - * @param int_usb register contents - * @param devctl - * @param power - */ -static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, - u8 devctl, u8 power) -{ - irqreturn_t handled = IRQ_NONE; - -#if 0 -/* REVISIT ... this would be for multiplexing periodic endpoints, or - * supporting transfer phasing to prevent exceeding ISO bandwidth - * limits of a given frame or microframe. - * - * It's not needed for peripheral side, which dedicates endpoints; - * though it _might_ use SOF irqs for other purposes. - * - * And it's not currently needed for host side, which also dedicates - * endpoints, relies on TX/RX interval registers, and isn't claimed - * to support ISO transfers yet. - */ - if (int_usb & MUSB_INTR_SOF) { - void __iomem *mbase = musb->mregs; - struct musb_hw_ep *ep; - u8 epnum; - u16 frame; - - DBG(6, "START_OF_FRAME\n"); - handled = IRQ_HANDLED; - - /* start any periodic Tx transfers waiting for current frame */ - frame = musb_readw(mbase, MUSB_FRAME); - ep = musb->endpoints; - for (epnum = 1; (epnum < musb->nr_endpoints) - && (musb->epmask >= (1 << epnum)); - epnum++, ep++) { - /* - * FIXME handle framecounter wraps (12 bits) - * eliminate duplicated StartUrb logic - */ - if (ep->dwWaitFrame >= frame) { - ep->dwWaitFrame = 0; - pr_debug("SOF --> periodic TX%s on %d\n", - ep->tx_channel ? " DMA" : "", - epnum); - if (!ep->tx_channel) - musb_h_tx_start(musb, epnum); - else - cppi_hostdma_start(musb, epnum); - } - } /* end of for loop */ - } -#endif - - if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { - DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", - otg_state_string(musb), - MUSB_MODE(musb), devctl); - handled = IRQ_HANDLED; - - switch (musb->xceiv.state) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_HOST: - case OTG_STATE_A_SUSPEND: - musb_root_disconnect(musb); - if (musb->a_wait_bcon != 0) - musb_platform_try_idle(musb, jiffies - + msecs_to_jiffies(musb->a_wait_bcon)); - break; -#endif /* HOST */ -#ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_B_HOST: - musb_hnp_stop(musb); - break; - case OTG_STATE_A_PERIPHERAL: - musb_hnp_stop(musb); - musb_root_disconnect(musb); - /* FALLTHROUGH */ - case OTG_STATE_B_WAIT_ACON: - /* FALLTHROUGH */ -#endif /* OTG */ -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - case OTG_STATE_B_PERIPHERAL: - case OTG_STATE_B_IDLE: - musb_g_disconnect(musb); - break; -#endif /* GADGET */ - default: - WARNING("unhandled DISCONNECT transition (%s)\n", - otg_state_string(musb)); - break; - } - - schedule_work(&musb->irq_work); - } - - if (int_usb & MUSB_INTR_SUSPEND) { - DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", - otg_state_string(musb), devctl, power); - handled = IRQ_HANDLED; - - switch (musb->xceiv.state) { -#ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_A_PERIPHERAL: - /* - * We cannot stop HNP here, devctl BDEVICE might be - * still set. - */ - break; -#endif - case OTG_STATE_B_PERIPHERAL: - musb_g_suspend(musb); - musb->is_active = is_otg_enabled(musb) - && musb->xceiv.gadget->b_hnp_enable; - if (musb->is_active) { -#ifdef CONFIG_USB_MUSB_OTG - musb->xceiv.state = OTG_STATE_B_WAIT_ACON; - DBG(1, "HNP: Setting timer for b_ase0_brst\n"); - musb_otg_timer.data = (unsigned long)musb; - mod_timer(&musb_otg_timer, jiffies - + msecs_to_jiffies(TB_ASE0_BRST)); -#endif - } - break; - case OTG_STATE_A_WAIT_BCON: - if (musb->a_wait_bcon != 0) - musb_platform_try_idle(musb, jiffies - + msecs_to_jiffies(musb->a_wait_bcon)); - break; - case OTG_STATE_A_HOST: - musb->xceiv.state = OTG_STATE_A_SUSPEND; - musb->is_active = is_otg_enabled(musb) - && musb->xceiv.host->b_hnp_enable; - break; - case OTG_STATE_B_HOST: - /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ - DBG(1, "REVISIT: SUSPEND as B_HOST\n"); - break; - default: - /* "should not happen" */ - musb->is_active = 0; - break; - } - schedule_work(&musb->irq_work); - } - - - return handled; -} - -/*-------------------------------------------------------------------------*/ - -/* -* Program the HDRC to start (enable interrupts, dma, etc.). -*/ -void musb_start(struct musb *musb) -{ - void __iomem *regs = musb->mregs; - u8 devctl = musb_readb(regs, MUSB_DEVCTL); - - DBG(2, "<== devctl %02x\n", devctl); - - /* Set INT enable registers, enable interrupts */ - musb_writew(regs, MUSB_INTRTXE, musb->epmask); - musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); - musb_writeb(regs, MUSB_INTRUSBE, 0xf7); - - musb_writeb(regs, MUSB_TESTMODE, 0); - - /* put into basic highspeed mode and start session */ - musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE - | MUSB_POWER_SOFTCONN - | MUSB_POWER_HSENAB - /* ENSUSPEND wedges tusb */ - /* | MUSB_POWER_ENSUSPEND */ - ); - - musb->is_active = 0; - devctl = musb_readb(regs, MUSB_DEVCTL); - devctl &= ~MUSB_DEVCTL_SESSION; - - if (is_otg_enabled(musb)) { - /* session started after: - * (a) ID-grounded irq, host mode; - * (b) vbus present/connect IRQ, peripheral mode; - * (c) peripheral initiates, using SRP - */ - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) - musb->is_active = 1; - else - devctl |= MUSB_DEVCTL_SESSION; - - } else if (is_host_enabled(musb)) { - /* assume ID pin is hard-wired to ground */ - devctl |= MUSB_DEVCTL_SESSION; - - } else /* peripheral is enabled */ { - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) - musb->is_active = 1; - } - musb_platform_enable(musb); - musb_writeb(regs, MUSB_DEVCTL, devctl); -} - - -static void musb_generic_disable(struct musb *musb) -{ - void __iomem *mbase = musb->mregs; - u16 temp; - - /* disable interrupts */ - musb_writeb(mbase, MUSB_INTRUSBE, 0); - musb_writew(mbase, MUSB_INTRTXE, 0); - musb_writew(mbase, MUSB_INTRRXE, 0); - - /* off */ - musb_writeb(mbase, MUSB_DEVCTL, 0); - - /* flush pending interrupts */ - temp = musb_readb(mbase, MUSB_INTRUSB); - temp = musb_readw(mbase, MUSB_INTRTX); - temp = musb_readw(mbase, MUSB_INTRRX); - -} - -/* - * Make the HDRC stop (disable interrupts, etc.); - * reversible by musb_start - * called on gadget driver unregister - * with controller locked, irqs blocked - * acts as a NOP unless some role activated the hardware - */ -void musb_stop(struct musb *musb) -{ - /* stop IRQs, timers, ... */ - musb_platform_disable(musb); - musb_generic_disable(musb); - DBG(3, "HDRC disabled\n"); - - /* FIXME - * - mark host and/or peripheral drivers unusable/inactive - * - disable DMA (and enable it in HdrcStart) - * - make sure we can musb_start() after musb_stop(); with - * OTG mode, gadget driver module rmmod/modprobe cycles that - * - ... - */ - musb_platform_try_idle(musb, 0); -} - -static void musb_shutdown(struct platform_device *pdev) -{ - struct musb *musb = dev_to_musb(&pdev->dev); - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - musb_platform_disable(musb); - musb_generic_disable(musb); - if (musb->clock) { - clk_put(musb->clock); - musb->clock = NULL; - } - spin_unlock_irqrestore(&musb->lock, flags); - - /* FIXME power down */ -} - - -/*-------------------------------------------------------------------------*/ - -/* - * The silicon either has hard-wired endpoint configurations, or else - * "dynamic fifo" sizing. The driver has support for both, though at this - * writing only the dynamic sizing is very well tested. We use normal - * idioms to so both modes are compile-tested, but dead code elimination - * leaves only the relevant one in the object file. - * - * We don't currently use dynamic fifo setup capability to do anything - * more than selecting one of a bunch of predefined configurations. - */ -#if defined(CONFIG_USB_TUSB6010) || \ - defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) -static ushort __initdata fifo_mode = 4; -#else -static ushort __initdata fifo_mode = 2; -#endif - -/* "modprobe ... fifo_mode=1" etc */ -module_param(fifo_mode, ushort, 0); -MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); - - -enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); -enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); - -struct fifo_cfg { - u8 hw_ep_num; - enum fifo_style style; - enum buf_mode mode; - u16 maxpacket; -}; - -/* - * tables defining fifo_mode values. define more if you like. - * for host side, make sure both halves of ep1 are set up. - */ - -/* mode 0 - fits in 2KB */ -static struct fifo_cfg __initdata mode_0_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, -{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, -{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, -}; - -/* mode 1 - fits in 4KB */ -static struct fifo_cfg __initdata mode_1_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, -{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, -{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, -{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, -}; - -/* mode 2 - fits in 4KB */ -static struct fifo_cfg __initdata mode_2_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, -{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, -}; - -/* mode 3 - fits in 4KB */ -static struct fifo_cfg __initdata mode_3_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, -{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, -{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, -}; - -/* mode 4 - fits in 16KB */ -static struct fifo_cfg __initdata mode_4_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, }, -{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, -{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, -}; - - -/* - * configure a fifo; for non-shared endpoints, this may be called - * once for a tx fifo and once for an rx fifo. - * - * returns negative errno or offset for next fifo. - */ -static int __init -fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, - const struct fifo_cfg *cfg, u16 offset) -{ - void __iomem *mbase = musb->mregs; - int size = 0; - u16 maxpacket = cfg->maxpacket; - u16 c_off = offset >> 3; - u8 c_size; - - /* expect hw_ep has already been zero-initialized */ - - size = ffs(max(maxpacket, (u16) 8)) - 1; - maxpacket = 1 << size; - - c_size = size - 3; - if (cfg->mode == BUF_DOUBLE) { - if ((offset + (maxpacket << 1)) > - (1 << (musb->config->ram_bits + 2))) - return -EMSGSIZE; - c_size |= MUSB_FIFOSZ_DPB; - } else { - if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) - return -EMSGSIZE; - } - - /* configure the FIFO */ - musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* EP0 reserved endpoint for control, bidirectional; - * EP1 reserved for bulk, two unidirection halves. - */ - if (hw_ep->epnum == 1) - musb->bulk_ep = hw_ep; - /* REVISIT error check: be sure ep0 can both rx and tx ... */ -#endif - switch (cfg->style) { - case FIFO_TX: - musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); - musb_writew(mbase, MUSB_TXFIFOADD, c_off); - hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); - hw_ep->max_packet_sz_tx = maxpacket; - break; - case FIFO_RX: - musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); - musb_writew(mbase, MUSB_RXFIFOADD, c_off); - hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); - hw_ep->max_packet_sz_rx = maxpacket; - break; - case FIFO_RXTX: - musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); - musb_writew(mbase, MUSB_TXFIFOADD, c_off); - hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); - hw_ep->max_packet_sz_rx = maxpacket; - - musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); - musb_writew(mbase, MUSB_RXFIFOADD, c_off); - hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; - hw_ep->max_packet_sz_tx = maxpacket; - - hw_ep->is_shared_fifo = true; - break; - } - - /* NOTE rx and tx endpoint irqs aren't managed separately, - * which happens to be ok - */ - musb->epmask |= (1 << hw_ep->epnum); - - return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); -} - -static struct fifo_cfg __initdata ep0_cfg = { - .style = FIFO_RXTX, .maxpacket = 64, -}; - -static int __init ep_config_from_table(struct musb *musb) -{ - const struct fifo_cfg *cfg; - unsigned i, n; - int offset; - struct musb_hw_ep *hw_ep = musb->endpoints; - - switch (fifo_mode) { - default: - fifo_mode = 0; - /* FALLTHROUGH */ - case 0: - cfg = mode_0_cfg; - n = ARRAY_SIZE(mode_0_cfg); - break; - case 1: - cfg = mode_1_cfg; - n = ARRAY_SIZE(mode_1_cfg); - break; - case 2: - cfg = mode_2_cfg; - n = ARRAY_SIZE(mode_2_cfg); - break; - case 3: - cfg = mode_3_cfg; - n = ARRAY_SIZE(mode_3_cfg); - break; - case 4: - cfg = mode_4_cfg; - n = ARRAY_SIZE(mode_4_cfg); - break; - } - - printk(KERN_DEBUG "%s: setup fifo_mode %d\n", - musb_driver_name, fifo_mode); - - - offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); - /* assert(offset > 0) */ - - /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would - * be better than static musb->config->num_eps and DYN_FIFO_SIZE... - */ - - for (i = 0; i < n; i++) { - u8 epn = cfg->hw_ep_num; - - if (epn >= musb->config->num_eps) { - pr_debug("%s: invalid ep %d\n", - musb_driver_name, epn); - continue; - } - offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); - if (offset < 0) { - pr_debug("%s: mem overrun, ep %d\n", - musb_driver_name, epn); - return -EINVAL; - } - epn++; - musb->nr_endpoints = max(epn, musb->nr_endpoints); - } - - printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", - musb_driver_name, - n + 1, musb->config->num_eps * 2 - 1, - offset, (1 << (musb->config->ram_bits + 2))); - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (!musb->bulk_ep) { - pr_debug("%s: missing bulk\n", musb_driver_name); - return -EINVAL; - } -#endif - - return 0; -} - - -/* - * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false - * @param musb the controller - */ -static int __init ep_config_from_hw(struct musb *musb) -{ - u8 epnum = 0, reg; - struct musb_hw_ep *hw_ep; - void *mbase = musb->mregs; - - DBG(2, "<== static silicon ep config\n"); - - /* FIXME pick up ep0 maxpacket size */ - - for (epnum = 1; epnum < musb->config->num_eps; epnum++) { - musb_ep_select(mbase, epnum); - hw_ep = musb->endpoints + epnum; - - /* read from core using indexed model */ - reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE); - if (!reg) { - /* 0's returned when no more endpoints */ - break; - } - musb->nr_endpoints++; - musb->epmask |= (1 << epnum); - - hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); - - /* shared TX/RX FIFO? */ - if ((reg & 0xf0) == 0xf0) { - hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; - hw_ep->is_shared_fifo = true; - continue; - } else { - hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); - hw_ep->is_shared_fifo = false; - } - - /* FIXME set up hw_ep->{rx,tx}_double_buffered */ - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* pick an RX/TX endpoint for bulk */ - if (hw_ep->max_packet_sz_tx < 512 - || hw_ep->max_packet_sz_rx < 512) - continue; - - /* REVISIT: this algorithm is lazy, we should at least - * try to pick a double buffered endpoint. - */ - if (musb->bulk_ep) - continue; - musb->bulk_ep = hw_ep; -#endif - } - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (!musb->bulk_ep) { - pr_debug("%s: missing bulk\n", musb_driver_name); - return -EINVAL; - } -#endif - - return 0; -} - -enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; - -/* Initialize MUSB (M)HDRC part of the USB hardware subsystem; - * configure endpoints, or take their config from silicon - */ -static int __init musb_core_init(u16 musb_type, struct musb *musb) -{ -#ifdef MUSB_AHB_ID - u32 data; -#endif - u8 reg; - char *type; - u16 hwvers, rev_major, rev_minor; - char aInfo[78], aRevision[32], aDate[12]; - void __iomem *mbase = musb->mregs; - int status = 0; - int i; - - /* log core options (read using indexed model) */ - musb_ep_select(mbase, 0); - reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); - - strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); - if (reg & MUSB_CONFIGDATA_DYNFIFO) - strcat(aInfo, ", dyn FIFOs"); - if (reg & MUSB_CONFIGDATA_MPRXE) { - strcat(aInfo, ", bulk combine"); -#ifdef C_MP_RX - musb->bulk_combine = true; -#else - strcat(aInfo, " (X)"); /* no driver support */ -#endif - } - if (reg & MUSB_CONFIGDATA_MPTXE) { - strcat(aInfo, ", bulk split"); -#ifdef C_MP_TX - musb->bulk_split = true; -#else - strcat(aInfo, " (X)"); /* no driver support */ -#endif - } - if (reg & MUSB_CONFIGDATA_HBRXE) { - strcat(aInfo, ", HB-ISO Rx"); - strcat(aInfo, " (X)"); /* no driver support */ - } - if (reg & MUSB_CONFIGDATA_HBTXE) { - strcat(aInfo, ", HB-ISO Tx"); - strcat(aInfo, " (X)"); /* no driver support */ - } - if (reg & MUSB_CONFIGDATA_SOFTCONE) - strcat(aInfo, ", SoftConn"); - - printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", - musb_driver_name, reg, aInfo); - -#ifdef MUSB_AHB_ID - data = musb_readl(mbase, 0x404); - sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff), - (data >> 16) & 0xff, (data >> 24) & 0xff); - /* FIXME ID2 and ID3 are unused */ - data = musb_readl(mbase, 0x408); - printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data); - data = musb_readl(mbase, 0x40c); - printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data); - reg = musb_readb(mbase, 0x400); - musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC; -#else - aDate[0] = 0; -#endif - if (MUSB_CONTROLLER_MHDRC == musb_type) { - musb->is_multipoint = 1; - type = "M"; - } else { - musb->is_multipoint = 0; - type = ""; -#ifdef CONFIG_USB_MUSB_HDRC_HCD -#ifndef CONFIG_USB_OTG_BLACKLIST_HUB - printk(KERN_ERR - "%s: kernel must blacklist external hubs\n", - musb_driver_name); -#endif -#endif - } - - /* log release info */ - hwvers = musb_readw(mbase, MUSB_HWVERS); - rev_major = (hwvers >> 10) & 0x1f; - rev_minor = hwvers & 0x3ff; - snprintf(aRevision, 32, "%d.%d%s", rev_major, - rev_minor, (hwvers & 0x8000) ? "RC" : ""); - printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", - musb_driver_name, type, aRevision, aDate); - - /* configure ep0 */ - musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; - musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; - - /* discover endpoint configuration */ - musb->nr_endpoints = 1; - musb->epmask = 1; - - if (reg & MUSB_CONFIGDATA_DYNFIFO) { - if (musb->config->dyn_fifo) - status = ep_config_from_table(musb); - else { - ERR("reconfigure software for Dynamic FIFOs\n"); - status = -ENODEV; - } - } else { - if (!musb->config->dyn_fifo) - status = ep_config_from_hw(musb); - else { - ERR("reconfigure software for static FIFOs\n"); - return -ENODEV; - } - } - - if (status < 0) - return status; - - /* finish init, and print endpoint config */ - for (i = 0; i < musb->nr_endpoints; i++) { - struct musb_hw_ep *hw_ep = musb->endpoints + i; - - hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; -#ifdef CONFIG_USB_TUSB6010 - hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); - hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); - hw_ep->fifo_sync_va = - musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); - - if (i == 0) - hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; - else - hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); -#endif - - hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; -#ifdef CONFIG_USB_MUSB_HDRC_HCD - hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase; - hw_ep->rx_reinit = 1; - hw_ep->tx_reinit = 1; -#endif - - if (hw_ep->max_packet_sz_tx) { - printk(KERN_DEBUG - "%s: hw_ep %d%s, %smax %d\n", - musb_driver_name, i, - hw_ep->is_shared_fifo ? "shared" : "tx", - hw_ep->tx_double_buffered - ? "doublebuffer, " : "", - hw_ep->max_packet_sz_tx); - } - if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { - printk(KERN_DEBUG - "%s: hw_ep %d%s, %smax %d\n", - musb_driver_name, i, - "rx", - hw_ep->rx_double_buffered - ? "doublebuffer, " : "", - hw_ep->max_packet_sz_rx); - } - if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) - DBG(1, "hw_ep %d not configured\n", i); - } - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) - -static irqreturn_t generic_interrupt(int irq, void *__hci) -{ - unsigned long flags; - irqreturn_t retval = IRQ_NONE; - struct musb *musb = __hci; - - spin_lock_irqsave(&musb->lock, flags); - - musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); - musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); - musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); - - if (musb->int_usb || musb->int_tx || musb->int_rx) - retval = musb_interrupt(musb); - - spin_unlock_irqrestore(&musb->lock, flags); - - /* REVISIT we sometimes get spurious IRQs on g_ep0 - * not clear why... - */ - if (retval != IRQ_HANDLED) - DBG(5, "spurious?\n"); - - return IRQ_HANDLED; -} - -#else -#define generic_interrupt NULL -#endif - -/* - * handle all the irqs defined by the HDRC core. for now we expect: other - * irq sources (phy, dma, etc) will be handled first, musb->int_* values - * will be assigned, and the irq will already have been acked. - * - * called in irq context with spinlock held, irqs blocked - */ -irqreturn_t musb_interrupt(struct musb *musb) -{ - irqreturn_t retval = IRQ_NONE; - u8 devctl, power; - int ep_num; - u32 reg; - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - power = musb_readb(musb->mregs, MUSB_POWER); - - DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n", - (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", - musb->int_usb, musb->int_tx, musb->int_rx); - - /* the core can interrupt us for multiple reasons; docs have - * a generic interrupt flowchart to follow - */ - if (musb->int_usb & STAGE0_MASK) - retval |= musb_stage0_irq(musb, musb->int_usb, - devctl, power); - - /* "stage 1" is handling endpoint irqs */ - - /* handle endpoint 0 first */ - if (musb->int_tx & 1) { - if (devctl & MUSB_DEVCTL_HM) - retval |= musb_h_ep0_irq(musb); - else - retval |= musb_g_ep0_irq(musb); - } - - /* RX on endpoints 1-15 */ - reg = musb->int_rx >> 1; - ep_num = 1; - while (reg) { - if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ - /* REVISIT just retval = ep->rx_irq(...) */ - retval = IRQ_HANDLED; - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) - musb_host_rx(musb, ep_num); - } else { - if (is_peripheral_capable()) - musb_g_rx(musb, ep_num); - } - } - - reg >>= 1; - ep_num++; - } - - /* TX on endpoints 1-15 */ - reg = musb->int_tx >> 1; - ep_num = 1; - while (reg) { - if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ - /* REVISIT just retval |= ep->tx_irq(...) */ - retval = IRQ_HANDLED; - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) - musb_host_tx(musb, ep_num); - } else { - if (is_peripheral_capable()) - musb_g_tx(musb, ep_num); - } - } - reg >>= 1; - ep_num++; - } - - /* finish handling "global" interrupts after handling fifos */ - if (musb->int_usb) - retval |= musb_stage2_irq(musb, - musb->int_usb, devctl, power); - - return retval; -} - - -#ifndef CONFIG_MUSB_PIO_ONLY -static int __initdata use_dma = 1; - -/* "modprobe ... use_dma=0" etc */ -module_param(use_dma, bool, 0); -MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); - -void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) -{ - u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - /* called with controller lock already held */ - - if (!epnum) { -#ifndef CONFIG_USB_TUSB_OMAP_DMA - if (!is_cppi_enabled()) { - /* endpoint 0 */ - if (devctl & MUSB_DEVCTL_HM) - musb_h_ep0_irq(musb); - else - musb_g_ep0_irq(musb); - } -#endif - } else { - /* endpoints 1..15 */ - if (transmit) { - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) - musb_host_tx(musb, epnum); - } else { - if (is_peripheral_capable()) - musb_g_tx(musb, epnum); - } - } else { - /* receive */ - if (devctl & MUSB_DEVCTL_HM) { - if (is_host_capable()) - musb_host_rx(musb, epnum); - } else { - if (is_peripheral_capable()) - musb_g_rx(musb, epnum); - } - } - } -} - -#else -#define use_dma 0 -#endif - -/*-------------------------------------------------------------------------*/ - -#ifdef CONFIG_SYSFS - -static ssize_t -musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct musb *musb = dev_to_musb(dev); - unsigned long flags; - int ret = -EINVAL; - - spin_lock_irqsave(&musb->lock, flags); - ret = sprintf(buf, "%s\n", otg_state_string(musb)); - spin_unlock_irqrestore(&musb->lock, flags); - - return ret; -} - -static ssize_t -musb_mode_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t n) -{ - struct musb *musb = dev_to_musb(dev); - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - if (!strncmp(buf, "host", 4)) - musb_platform_set_mode(musb, MUSB_HOST); - if (!strncmp(buf, "peripheral", 10)) - musb_platform_set_mode(musb, MUSB_PERIPHERAL); - if (!strncmp(buf, "otg", 3)) - musb_platform_set_mode(musb, MUSB_OTG); - spin_unlock_irqrestore(&musb->lock, flags); - - return n; -} -static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); - -static ssize_t -musb_vbus_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t n) -{ - struct musb *musb = dev_to_musb(dev); - unsigned long flags; - unsigned long val; - - if (sscanf(buf, "%lu", &val) < 1) { - printk(KERN_ERR "Invalid VBUS timeout ms value\n"); - return -EINVAL; - } - - spin_lock_irqsave(&musb->lock, flags); - musb->a_wait_bcon = val; - if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) - musb->is_active = 0; - musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); - spin_unlock_irqrestore(&musb->lock, flags); - - return n; -} - -static ssize_t -musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct musb *musb = dev_to_musb(dev); - unsigned long flags; - unsigned long val; - int vbus; - - spin_lock_irqsave(&musb->lock, flags); - val = musb->a_wait_bcon; - vbus = musb_platform_get_vbus_status(musb); - spin_unlock_irqrestore(&musb->lock, flags); - - return sprintf(buf, "Vbus %s, timeout %lu\n", - vbus ? "on" : "off", val); -} -static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - -/* Gadget drivers can't know that a host is connected so they might want - * to start SRP, but users can. This allows userspace to trigger SRP. - */ -static ssize_t -musb_srp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t n) -{ - struct musb *musb = dev_to_musb(dev); - unsigned short srp; - - if (sscanf(buf, "%hu", &srp) != 1 - || (srp != 1)) { - printk(KERN_ERR "SRP: Value must be 1\n"); - return -EINVAL; - } - - if (srp == 1) - musb_g_wakeup(musb); - - return n; -} -static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); - -#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ - -#endif /* sysfs */ - -/* Only used to provide driver mode change events */ -static void musb_irq_work(struct work_struct *data) -{ - struct musb *musb = container_of(data, struct musb, irq_work); - static int old_state; - - if (musb->xceiv.state != old_state) { - old_state = musb->xceiv.state; - sysfs_notify(&musb->controller->kobj, NULL, "mode"); - } -} - -/* -------------------------------------------------------------------------- - * Init support - */ - -static struct musb *__init -allocate_instance(struct device *dev, - struct musb_hdrc_config *config, void __iomem *mbase) -{ - struct musb *musb; - struct musb_hw_ep *ep; - int epnum; -#ifdef CONFIG_USB_MUSB_HDRC_HCD - struct usb_hcd *hcd; - - hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id); - if (!hcd) - return NULL; - /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ - - musb = hcd_to_musb(hcd); - INIT_LIST_HEAD(&musb->control); - INIT_LIST_HEAD(&musb->in_bulk); - INIT_LIST_HEAD(&musb->out_bulk); - - hcd->uses_new_polling = 1; - - musb->vbuserr_retry = VBUSERR_RETRY_COUNT; -#else - musb = kzalloc(sizeof *musb, GFP_KERNEL); - if (!musb) - return NULL; - dev_set_drvdata(dev, musb); - -#endif - - musb->mregs = mbase; - musb->ctrl_base = mbase; - musb->nIrq = -ENODEV; - musb->config = config; - for (epnum = 0, ep = musb->endpoints; - epnum < musb->config->num_eps; - epnum++, ep++) { - - ep->musb = musb; - ep->epnum = epnum; - } - - musb->controller = dev; - return musb; -} - -static void musb_free(struct musb *musb) -{ - /* this has multiple entry modes. it handles fault cleanup after - * probe(), where things may be partially set up, as well as rmmod - * cleanup after everything's been de-activated. - */ - -#ifdef CONFIG_SYSFS - device_remove_file(musb->controller, &dev_attr_mode); - device_remove_file(musb->controller, &dev_attr_vbus); -#ifdef CONFIG_USB_MUSB_OTG - device_remove_file(musb->controller, &dev_attr_srp); -#endif -#endif - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - musb_gadget_cleanup(musb); -#endif - - if (musb->nIrq >= 0) { - disable_irq_wake(musb->nIrq); - free_irq(musb->nIrq, musb); - } - if (is_dma_capable() && musb->dma_controller) { - struct dma_controller *c = musb->dma_controller; - - (void) c->stop(c); - dma_controller_destroy(c); - } - - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); - musb_platform_exit(musb); - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); - - if (musb->clock) { - clk_disable(musb->clock); - clk_put(musb->clock); - } - -#ifdef CONFIG_USB_MUSB_OTG - put_device(musb->xceiv.dev); -#endif - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - usb_put_hcd(musb_to_hcd(musb)); -#else - kfree(musb); -#endif -} - -/* - * Perform generic per-controller initialization. - * - * @pDevice: the controller (already clocked, etc) - * @nIrq: irq - * @mregs: virtual address of controller registers, - * not yet corrected for platform-specific offsets - */ -static int __init -musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) -{ - int status; - struct musb *musb; - struct musb_hdrc_platform_data *plat = dev->platform_data; - - /* The driver might handle more features than the board; OK. - * Fail when the board needs a feature that's not enabled. - */ - if (!plat) { - dev_dbg(dev, "no platform_data?\n"); - return -ENODEV; - } - switch (plat->mode) { - case MUSB_HOST: -#ifdef CONFIG_USB_MUSB_HDRC_HCD - break; -#else - goto bad_config; -#endif - case MUSB_PERIPHERAL: -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - break; -#else - goto bad_config; -#endif - case MUSB_OTG: -#ifdef CONFIG_USB_MUSB_OTG - break; -#else -bad_config: -#endif - default: - dev_err(dev, "incompatible Kconfig role setting\n"); - return -EINVAL; - } - - /* allocate */ - musb = allocate_instance(dev, plat->config, ctrl); - if (!musb) - return -ENOMEM; - - spin_lock_init(&musb->lock); - musb->board_mode = plat->mode; - musb->board_set_power = plat->set_power; - musb->set_clock = plat->set_clock; - musb->min_power = plat->min_power; - - /* Clock usage is chip-specific ... functional clock (DaVinci, - * OMAP2430), or PHY ref (some TUSB6010 boards). All this core - * code does is make sure a clock handle is available; platform - * code manages it during start/stop and suspend/resume. - */ - if (plat->clock) { - musb->clock = clk_get(dev, plat->clock); - if (IS_ERR(musb->clock)) { - status = PTR_ERR(musb->clock); - musb->clock = NULL; - goto fail; - } - } - - /* assume vbus is off */ - - /* platform adjusts musb->mregs and musb->isr if needed, - * and activates clocks - */ - musb->isr = generic_interrupt; - status = musb_platform_init(musb); - - if (status < 0) - goto fail; - if (!musb->isr) { - status = -ENODEV; - goto fail2; - } - -#ifndef CONFIG_MUSB_PIO_ONLY - if (use_dma && dev->dma_mask) { - struct dma_controller *c; - - c = dma_controller_create(musb, musb->mregs); - musb->dma_controller = c; - if (c) - (void) c->start(c); - } -#endif - /* ideally this would be abstracted in platform setup */ - if (!is_dma_capable() || !musb->dma_controller) - dev->dma_mask = NULL; - - /* be sure interrupts are disabled before connecting ISR */ - musb_platform_disable(musb); - musb_generic_disable(musb); - - /* setup musb parts of the core (especially endpoints) */ - status = musb_core_init(plat->config->multipoint - ? MUSB_CONTROLLER_MHDRC - : MUSB_CONTROLLER_HDRC, musb); - if (status < 0) - goto fail2; - - /* Init IRQ workqueue before request_irq */ - INIT_WORK(&musb->irq_work, musb_irq_work); - - /* attach to the IRQ */ - if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) { - dev_err(dev, "request_irq %d failed!\n", nIrq); - status = -ENODEV; - goto fail2; - } - musb->nIrq = nIrq; -/* FIXME this handles wakeup irqs wrong */ - if (enable_irq_wake(nIrq) == 0) - device_init_wakeup(dev, 1); - - pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n", - musb_driver_name, - ({char *s; - switch (musb->board_mode) { - case MUSB_HOST: s = "Host"; break; - case MUSB_PERIPHERAL: s = "Peripheral"; break; - default: s = "OTG"; break; - }; s; }), - ctrl, - (is_dma_capable() && musb->dma_controller) - ? "DMA" : "PIO", - musb->nIrq); - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* host side needs more setup, except for no-host modes */ - if (musb->board_mode != MUSB_PERIPHERAL) { - struct usb_hcd *hcd = musb_to_hcd(musb); - - if (musb->board_mode == MUSB_OTG) - hcd->self.otg_port = 1; - musb->xceiv.host = &hcd->self; - hcd->power_budget = 2 * (plat->power ? : 250); - } -#endif /* CONFIG_USB_MUSB_HDRC_HCD */ - - /* For the host-only role, we can activate right away. - * (We expect the ID pin to be forcibly grounded!!) - * Otherwise, wait till the gadget driver hooks up. - */ - if (!is_otg_enabled(musb) && is_host_enabled(musb)) { - MUSB_HST_MODE(musb); - musb->xceiv.default_a = 1; - musb->xceiv.state = OTG_STATE_A_IDLE; - - status = usb_add_hcd(musb_to_hcd(musb), -1, 0); - - DBG(1, "%s mode, status %d, devctl %02x %c\n", - "HOST", status, - musb_readb(musb->mregs, MUSB_DEVCTL), - (musb_readb(musb->mregs, MUSB_DEVCTL) - & MUSB_DEVCTL_BDEVICE - ? 'B' : 'A')); - - } else /* peripheral is enabled */ { - MUSB_DEV_MODE(musb); - musb->xceiv.default_a = 0; - musb->xceiv.state = OTG_STATE_B_IDLE; - - status = musb_gadget_setup(musb); - - DBG(1, "%s mode, status %d, dev%02x\n", - is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", - status, - musb_readb(musb->mregs, MUSB_DEVCTL)); - - } - - if (status == 0) - musb_debug_create("driver/musb_hdrc", musb); - else { -fail: - if (musb->clock) - clk_put(musb->clock); - device_init_wakeup(dev, 0); - musb_free(musb); - return status; - } - -#ifdef CONFIG_SYSFS - status = device_create_file(dev, &dev_attr_mode); - status = device_create_file(dev, &dev_attr_vbus); -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - status = device_create_file(dev, &dev_attr_srp); -#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ - status = 0; -#endif - - return status; - -fail2: - musb_platform_exit(musb); - goto fail; -} - -/*-------------------------------------------------------------------------*/ - -/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just - * bridge to a platform device; this driver then suffices. - */ - -#ifndef CONFIG_MUSB_PIO_ONLY -static u64 *orig_dma_mask; -#endif - -static int __init musb_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - int irq = platform_get_irq(pdev, 0); - struct resource *iomem; - void __iomem *base; - - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iomem || irq == 0) - return -ENODEV; - - base = ioremap(iomem->start, iomem->end - iomem->start + 1); - if (!base) { - dev_err(dev, "ioremap failed\n"); - return -ENOMEM; - } - -#ifndef CONFIG_MUSB_PIO_ONLY - /* clobbered by use_dma=n */ - orig_dma_mask = dev->dma_mask; -#endif - return musb_init_controller(dev, irq, base); -} - -static int __devexit musb_remove(struct platform_device *pdev) -{ - struct musb *musb = dev_to_musb(&pdev->dev); - void __iomem *ctrl_base = musb->ctrl_base; - - /* this gets called on rmmod. - * - Host mode: host may still be active - * - Peripheral mode: peripheral is deactivated (or never-activated) - * - OTG mode: both roles are deactivated (or never-activated) - */ - musb_shutdown(pdev); - musb_debug_delete("driver/musb_hdrc", musb); -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (musb->board_mode == MUSB_HOST) - usb_remove_hcd(musb_to_hcd(musb)); -#endif - musb_free(musb); - iounmap(ctrl_base); - device_init_wakeup(&pdev->dev, 0); -#ifndef CONFIG_MUSB_PIO_ONLY - pdev->dev.dma_mask = orig_dma_mask; -#endif - return 0; -} - -#ifdef CONFIG_PM - -static int musb_suspend(struct platform_device *pdev, pm_message_t message) -{ - unsigned long flags; - struct musb *musb = dev_to_musb(&pdev->dev); - - if (!musb->clock) - return 0; - - spin_lock_irqsave(&musb->lock, flags); - - if (is_peripheral_active(musb)) { - /* FIXME force disconnect unless we know USB will wake - * the system up quickly enough to respond ... - */ - } else if (is_host_active(musb)) { - /* we know all the children are suspended; sometimes - * they will even be wakeup-enabled. - */ - } - - if (musb->set_clock) - musb->set_clock(musb->clock, 0); - else - clk_disable(musb->clock); - spin_unlock_irqrestore(&musb->lock, flags); - return 0; -} - -static int musb_resume(struct platform_device *pdev) -{ - unsigned long flags; - struct musb *musb = dev_to_musb(&pdev->dev); - - if (!musb->clock) - return 0; - - spin_lock_irqsave(&musb->lock, flags); - - if (musb->set_clock) - musb->set_clock(musb->clock, 1); - else - clk_enable(musb->clock); - - /* for static cmos like DaVinci, register values were preserved - * unless for some reason the whole soc powered down and we're - * not treating that as a whole-system restart (e.g. swsusp) - */ - spin_unlock_irqrestore(&musb->lock, flags); - return 0; -} - -#else -#define musb_suspend NULL -#define musb_resume NULL -#endif - -static struct platform_driver musb_driver = { - .driver = { - .name = (char *)musb_driver_name, - .bus = &platform_bus_type, - .owner = THIS_MODULE, - }, - .remove = __devexit_p(musb_remove), - .shutdown = musb_shutdown, - .suspend = musb_suspend, - .resume = musb_resume, -}; - -/*-------------------------------------------------------------------------*/ - -static int __init musb_init(void) -{ -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (usb_disabled()) - return 0; -#endif - - pr_info("%s: version " MUSB_VERSION ", " -#ifdef CONFIG_MUSB_PIO_ONLY - "pio" -#elif defined(CONFIG_USB_TI_CPPI_DMA) - "cppi-dma" -#elif defined(CONFIG_USB_INVENTRA_DMA) - "musb-dma" -#elif defined(CONFIG_USB_TUSB_OMAP_DMA) - "tusb-omap-dma" -#else - "?dma?" -#endif - ", " -#ifdef CONFIG_USB_MUSB_OTG - "otg (peripheral+host)" -#elif defined(CONFIG_USB_GADGET_MUSB_HDRC) - "peripheral" -#elif defined(CONFIG_USB_MUSB_HDRC_HCD) - "host" -#endif - ", debug=%d\n", - musb_driver_name, debug); - return platform_driver_probe(&musb_driver, musb_probe); -} - -/* make us init after usbcore and before usb - * gadget and host-side drivers start to register - */ -subsys_initcall(musb_init); - -static void __exit musb_cleanup(void) -{ - platform_driver_unregister(&musb_driver); -} -module_exit(musb_cleanup); diff --git a/trunk/drivers/usb/musb/musb_core.h b/trunk/drivers/usb/musb/musb_core.h deleted file mode 100644 index eade46d81708..000000000000 --- a/trunk/drivers/usb/musb/musb_core.h +++ /dev/null @@ -1,507 +0,0 @@ -/* - * MUSB OTG driver defines - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_CORE_H__ -#define __MUSB_CORE_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct musb; -struct musb_hw_ep; -struct musb_ep; - - -#include "musb_debug.h" -#include "musb_dma.h" - -#include "musb_io.h" -#include "musb_regs.h" - -#include "musb_gadget.h" -#include "../core/hcd.h" -#include "musb_host.h" - - - -#ifdef CONFIG_USB_MUSB_OTG - -#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) -#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) -#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) - -/* NOTE: otg and peripheral-only state machines start at B_IDLE. - * OTG or host-only go to A_IDLE when ID is sensed. - */ -#define is_peripheral_active(m) (!(m)->is_host) -#define is_host_active(m) ((m)->is_host) - -#else -#define is_peripheral_enabled(musb) is_peripheral_capable() -#define is_host_enabled(musb) is_host_capable() -#define is_otg_enabled(musb) 0 - -#define is_peripheral_active(musb) is_peripheral_capable() -#define is_host_active(musb) is_host_capable() -#endif - -#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL) -/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always - * override that choice selection (often USB_GADGET_DUMMY_HCD). - */ -#ifndef CONFIG_USB_GADGET_MUSB_HDRC -#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC -#endif -#endif /* need MUSB gadget selection */ - - -#ifdef CONFIG_PROC_FS -#include -#define MUSB_CONFIG_PROC_FS -#endif - -/****************************** PERIPHERAL ROLE *****************************/ - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - -#define is_peripheral_capable() (1) - -extern irqreturn_t musb_g_ep0_irq(struct musb *); -extern void musb_g_tx(struct musb *, u8); -extern void musb_g_rx(struct musb *, u8); -extern void musb_g_reset(struct musb *); -extern void musb_g_suspend(struct musb *); -extern void musb_g_resume(struct musb *); -extern void musb_g_wakeup(struct musb *); -extern void musb_g_disconnect(struct musb *); - -#else - -#define is_peripheral_capable() (0) - -static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; } -static inline void musb_g_reset(struct musb *m) {} -static inline void musb_g_suspend(struct musb *m) {} -static inline void musb_g_resume(struct musb *m) {} -static inline void musb_g_wakeup(struct musb *m) {} -static inline void musb_g_disconnect(struct musb *m) {} - -#endif - -/****************************** HOST ROLE ***********************************/ - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - -#define is_host_capable() (1) - -extern irqreturn_t musb_h_ep0_irq(struct musb *); -extern void musb_host_tx(struct musb *, u8); -extern void musb_host_rx(struct musb *, u8); - -#else - -#define is_host_capable() (0) - -static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; } -static inline void musb_host_tx(struct musb *m, u8 e) {} -static inline void musb_host_rx(struct musb *m, u8 e) {} - -#endif - - -/****************************** CONSTANTS ********************************/ - -#ifndef MUSB_C_NUM_EPS -#define MUSB_C_NUM_EPS ((u8)16) -#endif - -#ifndef MUSB_MAX_END0_PACKET -#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) -#endif - -/* host side ep0 states */ -enum musb_h_ep0_state { - MUSB_EP0_IDLE, - MUSB_EP0_START, /* expect ack of setup */ - MUSB_EP0_IN, /* expect IN DATA */ - MUSB_EP0_OUT, /* expect ack of OUT DATA */ - MUSB_EP0_STATUS, /* expect ack of STATUS */ -} __attribute__ ((packed)); - -/* peripheral side ep0 states */ -enum musb_g_ep0_state { - MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */ - MUSB_EP0_STAGE_TX, /* IN data */ - MUSB_EP0_STAGE_RX, /* OUT data */ - MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ - MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ - MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ -} __attribute__ ((packed)); - -/* OTG protocol constants */ -#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ -#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */ -#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */ - -/*************************** REGISTER ACCESS ********************************/ - -/* Endpoint registers (other than dynfifo setup) can be accessed either - * directly with the "flat" model, or after setting up an index register. - */ - -#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ - || defined(CONFIG_ARCH_OMAP3430) -/* REVISIT indexed access seemed to - * misbehave (on DaVinci) for at least peripheral IN ... - */ -#define MUSB_FLAT_REG -#endif - -/* TUSB mapping: "flat" plus ep0 special cases */ -#if defined(CONFIG_USB_TUSB6010) -#define musb_ep_select(_mbase, _epnum) \ - musb_writeb((_mbase), MUSB_INDEX, (_epnum)) -#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET - -/* "flat" mapping: each endpoint has its own i/o address */ -#elif defined(MUSB_FLAT_REG) -#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) -#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET - -/* "indexed" mapping: INDEX register controls register bank select */ -#else -#define musb_ep_select(_mbase, _epnum) \ - musb_writeb((_mbase), MUSB_INDEX, (_epnum)) -#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET -#endif - -/****************************** FUNCTIONS ********************************/ - -#define MUSB_HST_MODE(_musb)\ - { (_musb)->is_host = true; } -#define MUSB_DEV_MODE(_musb) \ - { (_musb)->is_host = false; } - -#define test_devctl_hst_mode(_x) \ - (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) - -#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") - -/******************************** TYPES *************************************/ - -/* - * struct musb_hw_ep - endpoint hardware (bidirectional) - * - * Ordered slightly for better cacheline locality. - */ -struct musb_hw_ep { - struct musb *musb; - void __iomem *fifo; - void __iomem *regs; - -#ifdef CONFIG_USB_TUSB6010 - void __iomem *conf; -#endif - - /* index in musb->endpoints[] */ - u8 epnum; - - /* hardware configuration, possibly dynamic */ - bool is_shared_fifo; - bool tx_double_buffered; - bool rx_double_buffered; - u16 max_packet_sz_tx; - u16 max_packet_sz_rx; - - struct dma_channel *tx_channel; - struct dma_channel *rx_channel; - -#ifdef CONFIG_USB_TUSB6010 - /* TUSB has "asynchronous" and "synchronous" dma modes */ - dma_addr_t fifo_async; - dma_addr_t fifo_sync; - void __iomem *fifo_sync_va; -#endif - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - void __iomem *target_regs; - - /* currently scheduled peripheral endpoint */ - struct musb_qh *in_qh; - struct musb_qh *out_qh; - - u8 rx_reinit; - u8 tx_reinit; -#endif - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - /* peripheral side */ - struct musb_ep ep_in; /* TX */ - struct musb_ep ep_out; /* RX */ -#endif -}; - -static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) -{ -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - return next_request(&hw_ep->ep_in); -#else - return NULL; -#endif -} - -static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) -{ -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - return next_request(&hw_ep->ep_out); -#else - return NULL; -#endif -} - -/* - * struct musb - Driver instance data. - */ -struct musb { - /* device lock */ - spinlock_t lock; - struct clk *clock; - irqreturn_t (*isr)(int, void *); - struct work_struct irq_work; - -/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ -#define MUSB_PORT_STAT_RESUME (1 << 31) - - u32 port1_status; - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - unsigned long rh_timer; - - enum musb_h_ep0_state ep0_stage; - - /* bulk traffic normally dedicates endpoint hardware, and each - * direction has its own ring of host side endpoints. - * we try to progress the transfer at the head of each endpoint's - * queue until it completes or NAKs too much; then we try the next - * endpoint. - */ - struct musb_hw_ep *bulk_ep; - - struct list_head control; /* of musb_qh */ - struct list_head in_bulk; /* of musb_qh */ - struct list_head out_bulk; /* of musb_qh */ - struct musb_qh *periodic[32]; /* tree of interrupt+iso */ -#endif - - /* called with IRQs blocked; ON/nonzero implies starting a session, - * and waiting at least a_wait_vrise_tmout. - */ - void (*board_set_vbus)(struct musb *, int is_on); - - struct dma_controller *dma_controller; - - struct device *controller; - void __iomem *ctrl_base; - void __iomem *mregs; - -#ifdef CONFIG_USB_TUSB6010 - dma_addr_t async; - dma_addr_t sync; - void __iomem *sync_va; -#endif - - /* passed down from chip/board specific irq handlers */ - u8 int_usb; - u16 int_rx; - u16 int_tx; - - struct otg_transceiver xceiv; - - int nIrq; - - struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; -#define control_ep endpoints - -#define VBUSERR_RETRY_COUNT 3 - u16 vbuserr_retry; - u16 epmask; - u8 nr_endpoints; - - u8 board_mode; /* enum musb_mode */ - int (*board_set_power)(int state); - - int (*set_clock)(struct clk *clk, int is_active); - - u8 min_power; /* vbus for periph, in mA/2 */ - - bool is_host; - - int a_wait_bcon; /* VBUS timeout in msecs */ - unsigned long idle_timeout; /* Next timeout in jiffies */ - - /* active means connected and not suspended */ - unsigned is_active:1; - - unsigned is_multipoint:1; - unsigned ignore_disconnect:1; /* during bus resets */ - -#ifdef C_MP_TX - unsigned bulk_split:1; -#define can_bulk_split(musb,type) \ - (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) -#else -#define can_bulk_split(musb, type) 0 -#endif - -#ifdef C_MP_RX - unsigned bulk_combine:1; -#define can_bulk_combine(musb,type) \ - (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) -#else -#define can_bulk_combine(musb, type) 0 -#endif - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - /* is_suspended means USB B_PERIPHERAL suspend */ - unsigned is_suspended:1; - - /* may_wakeup means remote wakeup is enabled */ - unsigned may_wakeup:1; - - /* is_self_powered is reported in device status and the - * config descriptor. is_bus_powered means B_PERIPHERAL - * draws some VBUS current; both can be true. - */ - unsigned is_self_powered:1; - unsigned is_bus_powered:1; - - unsigned set_address:1; - unsigned test_mode:1; - unsigned softconnect:1; - - u8 address; - u8 test_mode_nr; - u16 ackpend; /* ep0 */ - enum musb_g_ep0_state ep0_state; - struct usb_gadget g; /* the gadget */ - struct usb_gadget_driver *gadget_driver; /* its driver */ -#endif - - struct musb_hdrc_config *config; - -#ifdef MUSB_CONFIG_PROC_FS - struct proc_dir_entry *proc_entry; -#endif -}; - -static inline void musb_set_vbus(struct musb *musb, int is_on) -{ - musb->board_set_vbus(musb, is_on); -} - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC -static inline struct musb *gadget_to_musb(struct usb_gadget *g) -{ - return container_of(g, struct musb, g); -} -#endif - - -/***************************** Glue it together *****************************/ - -extern const char musb_driver_name[]; - -extern void musb_start(struct musb *musb); -extern void musb_stop(struct musb *musb); - -extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); -extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); - -extern void musb_load_testpacket(struct musb *); - -extern irqreturn_t musb_interrupt(struct musb *); - -extern void musb_platform_enable(struct musb *musb); -extern void musb_platform_disable(struct musb *musb); - -extern void musb_hnp_stop(struct musb *musb); - -extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode); - -#if defined(CONFIG_USB_TUSB6010) || \ - defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) -extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); -#else -#define musb_platform_try_idle(x, y) do {} while (0) -#endif - -#ifdef CONFIG_USB_TUSB6010 -extern int musb_platform_get_vbus_status(struct musb *musb); -#else -#define musb_platform_get_vbus_status(x) 0 -#endif - -extern int __init musb_platform_init(struct musb *musb); -extern int musb_platform_exit(struct musb *musb); - -/*-------------------------- ProcFS definitions ---------------------*/ - -struct proc_dir_entry; - -#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS) -extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data); -extern void musb_debug_delete(char *name, struct musb *data); - -#else -static inline struct proc_dir_entry * -musb_debug_create(char *name, struct musb *data) -{ - return NULL; -} -static inline void musb_debug_delete(char *name, struct musb *data) -{ -} -#endif - -#endif /* __MUSB_CORE_H__ */ diff --git a/trunk/drivers/usb/musb/musb_debug.h b/trunk/drivers/usb/musb/musb_debug.h deleted file mode 100644 index 3bdb311e820d..000000000000 --- a/trunk/drivers/usb/musb/musb_debug.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * MUSB OTG driver debug defines - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_LINUX_DEBUG_H__ -#define __MUSB_LINUX_DEBUG_H__ - -#define yprintk(facility, format, args...) \ - do { printk(facility "%s %d: " format , \ - __func__, __LINE__ , ## args); } while (0) -#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args) -#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) -#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) - -#define xprintk(level, facility, format, args...) do { \ - if (_dbg_level(level)) { \ - printk(facility "%s %d: " format , \ - __func__, __LINE__ , ## args); \ - } } while (0) - -#if MUSB_DEBUG > 0 -extern unsigned debug; -#else -#define debug 0 -#endif - -static inline int _dbg_level(unsigned l) -{ - return debug >= l; -} - -#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) - -extern const char *otg_state_string(struct musb *); - -#endif /* __MUSB_LINUX_DEBUG_H__ */ diff --git a/trunk/drivers/usb/musb/musb_dma.h b/trunk/drivers/usb/musb/musb_dma.h deleted file mode 100644 index 0a2c4e3602c1..000000000000 --- a/trunk/drivers/usb/musb/musb_dma.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * MUSB OTG driver DMA controller abstraction - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_DMA_H__ -#define __MUSB_DMA_H__ - -struct musb_hw_ep; - -/* - * DMA Controller Abstraction - * - * DMA Controllers are abstracted to allow use of a variety of different - * implementations of DMA, as allowed by the Inventra USB cores. On the - * host side, usbcore sets up the DMA mappings and flushes caches; on the - * peripheral side, the gadget controller driver does. Responsibilities - * of a DMA controller driver include: - * - * - Handling the details of moving multiple USB packets - * in cooperation with the Inventra USB core, including especially - * the correct RX side treatment of short packets and buffer-full - * states (both of which terminate transfers). - * - * - Knowing the correlation between dma channels and the - * Inventra core's local endpoint resources and data direction. - * - * - Maintaining a list of allocated/available channels. - * - * - Updating channel status on interrupts, - * whether shared with the Inventra core or separate. - */ - -#define DMA_ADDR_INVALID (~(dma_addr_t)0) - -#ifndef CONFIG_MUSB_PIO_ONLY -#define is_dma_capable() (1) -#else -#define is_dma_capable() (0) -#endif - -#ifdef CONFIG_USB_TI_CPPI_DMA -#define is_cppi_enabled() 1 -#else -#define is_cppi_enabled() 0 -#endif - -#ifdef CONFIG_USB_TUSB_OMAP_DMA -#define tusb_dma_omap() 1 -#else -#define tusb_dma_omap() 0 -#endif - -/* - * DMA channel status ... updated by the dma controller driver whenever that - * status changes, and protected by the overall controller spinlock. - */ -enum dma_channel_status { - /* unallocated */ - MUSB_DMA_STATUS_UNKNOWN, - /* allocated ... but not busy, no errors */ - MUSB_DMA_STATUS_FREE, - /* busy ... transactions are active */ - MUSB_DMA_STATUS_BUSY, - /* transaction(s) aborted due to ... dma or memory bus error */ - MUSB_DMA_STATUS_BUS_ABORT, - /* transaction(s) aborted due to ... core error or USB fault */ - MUSB_DMA_STATUS_CORE_ABORT -}; - -struct dma_controller; - -/** - * struct dma_channel - A DMA channel. - * @private_data: channel-private data - * @max_len: the maximum number of bytes the channel can move in one - * transaction (typically representing many USB maximum-sized packets) - * @actual_len: how many bytes have been transferred - * @status: current channel status (updated e.g. on interrupt) - * @desired_mode: true if mode 1 is desired; false if mode 0 is desired - * - * channels are associated with an endpoint for the duration of at least - * one usb transfer. - */ -struct dma_channel { - void *private_data; - /* FIXME not void* private_data, but a dma_controller * */ - size_t max_len; - size_t actual_len; - enum dma_channel_status status; - bool desired_mode; -}; - -/* - * dma_channel_status - return status of dma channel - * @c: the channel - * - * Returns the software's view of the channel status. If that status is BUSY - * then it's possible that the hardware has completed (or aborted) a transfer, - * so the driver needs to update that status. - */ -static inline enum dma_channel_status -dma_channel_status(struct dma_channel *c) -{ - return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; -} - -/** - * struct dma_controller - A DMA Controller. - * @start: call this to start a DMA controller; - * return 0 on success, else negative errno - * @stop: call this to stop a DMA controller - * return 0 on success, else negative errno - * @channel_alloc: call this to allocate a DMA channel - * @channel_release: call this to release a DMA channel - * @channel_abort: call this to abort a pending DMA transaction, - * returning it to FREE (but allocated) state - * - * Controllers manage dma channels. - */ -struct dma_controller { - int (*start)(struct dma_controller *); - int (*stop)(struct dma_controller *); - struct dma_channel *(*channel_alloc)(struct dma_controller *, - struct musb_hw_ep *, u8 is_tx); - void (*channel_release)(struct dma_channel *); - int (*channel_program)(struct dma_channel *channel, - u16 maxpacket, u8 mode, - dma_addr_t dma_addr, - u32 length); - int (*channel_abort)(struct dma_channel *); -}; - -/* called after channel_program(), may indicate a fault */ -extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); - - -extern struct dma_controller *__init -dma_controller_create(struct musb *, void __iomem *); - -extern void dma_controller_destroy(struct dma_controller *); - -#endif /* __MUSB_DMA_H__ */ diff --git a/trunk/drivers/usb/musb/musb_gadget.c b/trunk/drivers/usb/musb/musb_gadget.c deleted file mode 100644 index d6a802c224fa..000000000000 --- a/trunk/drivers/usb/musb/musb_gadget.c +++ /dev/null @@ -1,2031 +0,0 @@ -/* - * MUSB OTG driver peripheral support - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" - - -/* MUSB PERIPHERAL status 3-mar-2006: - * - * - EP0 seems solid. It passes both USBCV and usbtest control cases. - * Minor glitches: - * - * + remote wakeup to Linux hosts work, but saw USBCV failures; - * in one test run (operator error?) - * + endpoint halt tests -- in both usbtest and usbcv -- seem - * to break when dma is enabled ... is something wrongly - * clearing SENDSTALL? - * - * - Mass storage behaved ok when last tested. Network traffic patterns - * (with lots of short transfers etc) need retesting; they turn up the - * worst cases of the DMA, since short packets are typical but are not - * required. - * - * - TX/IN - * + both pio and dma behave in with network and g_zero tests - * + no cppi throughput issues other than no-hw-queueing - * + failed with FLAT_REG (DaVinci) - * + seems to behave with double buffering, PIO -and- CPPI - * + with gadgetfs + AIO, requests got lost? - * - * - RX/OUT - * + both pio and dma behave in with network and g_zero tests - * + dma is slow in typical case (short_not_ok is clear) - * + double buffering ok with PIO - * + double buffering *FAILS* with CPPI, wrong data bytes sometimes - * + request lossage observed with gadgetfs - * - * - ISO not tested ... might work, but only weakly isochronous - * - * - Gadget driver disabling of softconnect during bind() is ignored; so - * drivers can't hold off host requests until userspace is ready. - * (Workaround: they can turn it off later.) - * - * - PORTABILITY (assumes PIO works): - * + DaVinci, basically works with cppi dma - * + OMAP 2430, ditto with mentor dma - * + TUSB 6010, platform-specific dma in the works - */ - -/* ----------------------------------------------------------------------- */ - -/* - * Immediately complete a request. - * - * @param request the request to complete - * @param status the status to complete the request with - * Context: controller locked, IRQs blocked. - */ -void musb_g_giveback( - struct musb_ep *ep, - struct usb_request *request, - int status) -__releases(ep->musb->lock) -__acquires(ep->musb->lock) -{ - struct musb_request *req; - struct musb *musb; - int busy = ep->busy; - - req = to_musb_request(request); - - list_del(&request->list); - if (req->request.status == -EINPROGRESS) - req->request.status = status; - musb = req->musb; - - ep->busy = 1; - spin_unlock(&musb->lock); - if (is_dma_capable()) { - if (req->mapped) { - dma_unmap_single(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - req->request.dma = DMA_ADDR_INVALID; - req->mapped = 0; - } else if (req->request.dma != DMA_ADDR_INVALID) - dma_sync_single_for_cpu(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - } - if (request->status == 0) - DBG(5, "%s done request %p, %d/%d\n", - ep->end_point.name, request, - req->request.actual, req->request.length); - else - DBG(2, "%s request %p, %d/%d fault %d\n", - ep->end_point.name, request, - req->request.actual, req->request.length, - request->status); - req->request.complete(&req->ep->end_point, &req->request); - spin_lock(&musb->lock); - ep->busy = busy; -} - -/* ----------------------------------------------------------------------- */ - -/* - * Abort requests queued to an endpoint using the status. Synchronous. - * caller locked controller and blocked irqs, and selected this ep. - */ -static void nuke(struct musb_ep *ep, const int status) -{ - struct musb_request *req = NULL; - void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; - - ep->busy = 1; - - if (is_dma_capable() && ep->dma) { - struct dma_controller *c = ep->musb->dma_controller; - int value; - if (ep->is_in) { - musb_writew(epio, MUSB_TXCSR, - 0 | MUSB_TXCSR_FLUSHFIFO); - musb_writew(epio, MUSB_TXCSR, - 0 | MUSB_TXCSR_FLUSHFIFO); - } else { - musb_writew(epio, MUSB_RXCSR, - 0 | MUSB_RXCSR_FLUSHFIFO); - musb_writew(epio, MUSB_RXCSR, - 0 | MUSB_RXCSR_FLUSHFIFO); - } - - value = c->channel_abort(ep->dma); - DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); - c->channel_release(ep->dma); - ep->dma = NULL; - } - - while (!list_empty(&(ep->req_list))) { - req = container_of(ep->req_list.next, struct musb_request, - request.list); - musb_g_giveback(ep, &req->request, status); - } -} - -/* ----------------------------------------------------------------------- */ - -/* Data transfers - pure PIO, pure DMA, or mixed mode */ - -/* - * This assumes the separate CPPI engine is responding to DMA requests - * from the usb core ... sequenced a bit differently from mentor dma. - */ - -static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) -{ - if (can_bulk_split(musb, ep->type)) - return ep->hw_ep->max_packet_sz_tx; - else - return ep->packet_sz; -} - - -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Peripheral tx (IN) using Mentor DMA works as follows: - Only mode 0 is used for transfers <= wPktSize, - mode 1 is used for larger transfers, - - One of the following happens: - - Host sends IN token which causes an endpoint interrupt - -> TxAvail - -> if DMA is currently busy, exit. - -> if queue is non-empty, txstate(). - - - Request is queued by the gadget driver. - -> if queue was previously empty, txstate() - - txstate() - -> start - /\ -> setup DMA - | (data is transferred to the FIFO, then sent out when - | IN token(s) are recd from Host. - | -> DMA interrupt on completion - | calls TxAvail. - | -> stop DMA, ~DmaEenab, - | -> set TxPktRdy for last short pkt or zlp - | -> Complete Request - | -> Continue next request (call txstate) - |___________________________________| - - * Non-Mentor DMA engines can of course work differently, such as by - * upleveling from irq-per-packet to irq-per-buffer. - */ - -#endif - -/* - * An endpoint is transmitting data. This can be called either from - * the IRQ routine or from ep.queue() to kickstart a request on an - * endpoint. - * - * Context: controller locked, IRQs blocked, endpoint selected - */ -static void txstate(struct musb *musb, struct musb_request *req) -{ - u8 epnum = req->epnum; - struct musb_ep *musb_ep; - void __iomem *epio = musb->endpoints[epnum].regs; - struct usb_request *request; - u16 fifo_count = 0, csr; - int use_dma = 0; - - musb_ep = req->ep; - - /* we shouldn't get here while DMA is active ... but we do ... */ - if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { - DBG(4, "dma pending...\n"); - return; - } - - /* read TXCSR before */ - csr = musb_readw(epio, MUSB_TXCSR); - - request = &req->request; - fifo_count = min(max_ep_writesize(musb, musb_ep), - (int)(request->length - request->actual)); - - if (csr & MUSB_TXCSR_TXPKTRDY) { - DBG(5, "%s old packet still ready , txcsr %03x\n", - musb_ep->end_point.name, csr); - return; - } - - if (csr & MUSB_TXCSR_P_SENDSTALL) { - DBG(5, "%s stalling, txcsr %03x\n", - musb_ep->end_point.name, csr); - return; - } - - DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", - epnum, musb_ep->packet_sz, fifo_count, - csr); - -#ifndef CONFIG_MUSB_PIO_ONLY - if (is_dma_capable() && musb_ep->dma) { - struct dma_controller *c = musb->dma_controller; - - use_dma = (request->dma != DMA_ADDR_INVALID); - - /* MUSB_TXCSR_P_ISO is still set correctly */ - -#ifdef CONFIG_USB_INVENTRA_DMA - { - size_t request_size; - - /* setup DMA, then program endpoint CSR */ - request_size = min(request->length, - musb_ep->dma->max_len); - if (request_size <= musb_ep->packet_sz) - musb_ep->dma->desired_mode = 0; - else - musb_ep->dma->desired_mode = 1; - - use_dma = use_dma && c->channel_program( - musb_ep->dma, musb_ep->packet_sz, - musb_ep->dma->desired_mode, - request->dma, request_size); - if (use_dma) { - if (musb_ep->dma->desired_mode == 0) { - /* ASSERT: DMAENAB is clear */ - csr &= ~(MUSB_TXCSR_AUTOSET | - MUSB_TXCSR_DMAMODE); - csr |= (MUSB_TXCSR_DMAENAB | - MUSB_TXCSR_MODE); - /* against programming guide */ - } else - csr |= (MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_MODE); - - csr &= ~MUSB_TXCSR_P_UNDERRUN; - musb_writew(epio, MUSB_TXCSR, csr); - } - } - -#elif defined(CONFIG_USB_TI_CPPI_DMA) - /* program endpoint CSR first, then setup DMA */ - csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_P_UNDERRUN - | MUSB_TXCSR_TXPKTRDY); - csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; - musb_writew(epio, MUSB_TXCSR, - (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) - | csr); - - /* ensure writebuffer is empty */ - csr = musb_readw(epio, MUSB_TXCSR); - - /* NOTE host side sets DMAENAB later than this; both are - * OK since the transfer dma glue (between CPPI and Mentor - * fifos) just tells CPPI it could start. Data only moves - * to the USB TX fifo when both fifos are ready. - */ - - /* "mode" is irrelevant here; handle terminating ZLPs like - * PIO does, since the hardware RNDIS mode seems unreliable - * except for the last-packet-is-already-short case. - */ - use_dma = use_dma && c->channel_program( - musb_ep->dma, musb_ep->packet_sz, - 0, - request->dma, - request->length); - if (!use_dma) { - c->channel_release(musb_ep->dma); - musb_ep->dma = NULL; - /* ASSERT: DMAENAB clear */ - csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); - /* invariant: prequest->buf is non-null */ - } -#elif defined(CONFIG_USB_TUSB_OMAP_DMA) - use_dma = use_dma && c->channel_program( - musb_ep->dma, musb_ep->packet_sz, - request->zero, - request->dma, - request->length); -#endif - } -#endif - - if (!use_dma) { - musb_write_fifo(musb_ep->hw_ep, fifo_count, - (u8 *) (request->buf + request->actual)); - request->actual += fifo_count; - csr |= MUSB_TXCSR_TXPKTRDY; - csr &= ~MUSB_TXCSR_P_UNDERRUN; - musb_writew(epio, MUSB_TXCSR, csr); - } - - /* host may already have the data when this message shows... */ - DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", - musb_ep->end_point.name, use_dma ? "dma" : "pio", - request->actual, request->length, - musb_readw(epio, MUSB_TXCSR), - fifo_count, - musb_readw(epio, MUSB_TXMAXP)); -} - -/* - * FIFO state update (e.g. data ready). - * Called from IRQ, with controller locked. - */ -void musb_g_tx(struct musb *musb, u8 epnum) -{ - u16 csr; - struct usb_request *request; - u8 __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; - void __iomem *epio = musb->endpoints[epnum].regs; - struct dma_channel *dma; - - musb_ep_select(mbase, epnum); - request = next_request(musb_ep); - - csr = musb_readw(epio, MUSB_TXCSR); - DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); - - dma = is_dma_capable() ? musb_ep->dma : NULL; - do { - /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX - * probably rates reporting as a host error - */ - if (csr & MUSB_TXCSR_P_SENTSTALL) { - csr |= MUSB_TXCSR_P_WZC_BITS; - csr &= ~MUSB_TXCSR_P_SENTSTALL; - musb_writew(epio, MUSB_TXCSR, csr); - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - dma->status = MUSB_DMA_STATUS_CORE_ABORT; - musb->dma_controller->channel_abort(dma); - } - - if (request) - musb_g_giveback(musb_ep, request, -EPIPE); - - break; - } - - if (csr & MUSB_TXCSR_P_UNDERRUN) { - /* we NAKed, no big deal ... little reason to care */ - csr |= MUSB_TXCSR_P_WZC_BITS; - csr &= ~(MUSB_TXCSR_P_UNDERRUN - | MUSB_TXCSR_TXPKTRDY); - musb_writew(epio, MUSB_TXCSR, csr); - DBG(20, "underrun on ep%d, req %p\n", epnum, request); - } - - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - /* SHOULD NOT HAPPEN ... has with cppi though, after - * changing SENDSTALL (and other cases); harmless? - */ - DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); - break; - } - - if (request) { - u8 is_dma = 0; - - if (dma && (csr & MUSB_TXCSR_DMAENAB)) { - is_dma = 1; - csr |= MUSB_TXCSR_P_WZC_BITS; - csr &= ~(MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_P_UNDERRUN - | MUSB_TXCSR_TXPKTRDY); - musb_writew(epio, MUSB_TXCSR, csr); - /* ensure writebuffer is empty */ - csr = musb_readw(epio, MUSB_TXCSR); - request->actual += musb_ep->dma->actual_len; - DBG(4, "TXCSR%d %04x, dma off, " - "len %zu, req %p\n", - epnum, csr, - musb_ep->dma->actual_len, - request); - } - - if (is_dma || request->actual == request->length) { - - /* First, maybe a terminating short packet. - * Some DMA engines might handle this by - * themselves. - */ - if ((request->zero - && request->length - && (request->length - % musb_ep->packet_sz) - == 0) -#ifdef CONFIG_USB_INVENTRA_DMA - || (is_dma && - ((!dma->desired_mode) || - (request->actual & - (musb_ep->packet_sz - 1)))) -#endif - ) { - /* on dma completion, fifo may not - * be available yet ... - */ - if (csr & MUSB_TXCSR_TXPKTRDY) - break; - - DBG(4, "sending zero pkt\n"); - musb_writew(epio, MUSB_TXCSR, - MUSB_TXCSR_MODE - | MUSB_TXCSR_TXPKTRDY); - request->zero = 0; - } - - /* ... or if not, then complete it */ - musb_g_giveback(musb_ep, request, 0); - - /* kickstart next transfer if appropriate; - * the packet that just completed might not - * be transmitted for hours or days. - * REVISIT for double buffering... - * FIXME revisit for stalls too... - */ - musb_ep_select(mbase, epnum); - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) - break; - request = musb_ep->desc - ? next_request(musb_ep) - : NULL; - if (!request) { - DBG(4, "%s idle now\n", - musb_ep->end_point.name); - break; - } - } - - txstate(musb, to_musb_request(request)); - } - - } while (0); -} - -/* ------------------------------------------------------------ */ - -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Peripheral rx (OUT) using Mentor DMA works as follows: - - Only mode 0 is used. - - - Request is queued by the gadget class driver. - -> if queue was previously empty, rxstate() - - - Host sends OUT token which causes an endpoint interrupt - /\ -> RxReady - | -> if request queued, call rxstate - | /\ -> setup DMA - | | -> DMA interrupt on completion - | | -> RxReady - | | -> stop DMA - | | -> ack the read - | | -> if data recd = max expected - | | by the request, or host - | | sent a short packet, - | | complete the request, - | | and start the next one. - | |_____________________________________| - | else just wait for the host - | to send the next OUT token. - |__________________________________________________| - - * Non-Mentor DMA engines can of course work differently. - */ - -#endif - -/* - * Context: controller locked, IRQs blocked, endpoint selected - */ -static void rxstate(struct musb *musb, struct musb_request *req) -{ - u16 csr = 0; - const u8 epnum = req->epnum; - struct usb_request *request = &req->request; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; - void __iomem *epio = musb->endpoints[epnum].regs; - u16 fifo_count = 0; - u16 len = musb_ep->packet_sz; - - csr = musb_readw(epio, MUSB_RXCSR); - - if (is_cppi_enabled() && musb_ep->dma) { - struct dma_controller *c = musb->dma_controller; - struct dma_channel *channel = musb_ep->dma; - - /* NOTE: CPPI won't actually stop advancing the DMA - * queue after short packet transfers, so this is almost - * always going to run as IRQ-per-packet DMA so that - * faults will be handled correctly. - */ - if (c->channel_program(channel, - musb_ep->packet_sz, - !request->short_not_ok, - request->dma + request->actual, - request->length - request->actual)) { - - /* make sure that if an rxpkt arrived after the irq, - * the cppi engine will be ready to take it as soon - * as DMA is enabled - */ - csr &= ~(MUSB_RXCSR_AUTOCLEAR - | MUSB_RXCSR_DMAMODE); - csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; - musb_writew(epio, MUSB_RXCSR, csr); - return; - } - } - - if (csr & MUSB_RXCSR_RXPKTRDY) { - len = musb_readw(epio, MUSB_RXCOUNT); - if (request->actual < request->length) { -#ifdef CONFIG_USB_INVENTRA_DMA - if (is_dma_capable() && musb_ep->dma) { - struct dma_controller *c; - struct dma_channel *channel; - int use_dma = 0; - - c = musb->dma_controller; - channel = musb_ep->dma; - - /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in - * mode 0 only. So we do not get endpoint interrupts due to DMA - * completion. We only get interrupts from DMA controller. - * - * We could operate in DMA mode 1 if we knew the size of the tranfer - * in advance. For mass storage class, request->length = what the host - * sends, so that'd work. But for pretty much everything else, - * request->length is routinely more than what the host sends. For - * most these gadgets, end of is signified either by a short packet, - * or filling the last byte of the buffer. (Sending extra data in - * that last pckate should trigger an overflow fault.) But in mode 1, - * we don't get DMA completion interrrupt for short packets. - * - * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), - * to get endpoint interrupt on every DMA req, but that didn't seem - * to work reliably. - * - * REVISIT an updated g_file_storage can set req->short_not_ok, which - * then becomes usable as a runtime "use mode 1" hint... - */ - - csr |= MUSB_RXCSR_DMAENAB; -#ifdef USE_MODE1 - csr |= MUSB_RXCSR_AUTOCLEAR; - /* csr |= MUSB_RXCSR_DMAMODE; */ - - /* this special sequence (enabling and then - * disabling MUSB_RXCSR_DMAMODE) is required - * to get DMAReq to activate - */ - musb_writew(epio, MUSB_RXCSR, - csr | MUSB_RXCSR_DMAMODE); -#endif - musb_writew(epio, MUSB_RXCSR, csr); - - if (request->actual < request->length) { - int transfer_size = 0; -#ifdef USE_MODE1 - transfer_size = min(request->length, - channel->max_len); -#else - transfer_size = len; -#endif - if (transfer_size <= musb_ep->packet_sz) - musb_ep->dma->desired_mode = 0; - else - musb_ep->dma->desired_mode = 1; - - use_dma = c->channel_program( - channel, - musb_ep->packet_sz, - channel->desired_mode, - request->dma - + request->actual, - transfer_size); - } - - if (use_dma) - return; - } -#endif /* Mentor's DMA */ - - fifo_count = request->length - request->actual; - DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", - musb_ep->end_point.name, - len, fifo_count, - musb_ep->packet_sz); - - fifo_count = min(len, fifo_count); - -#ifdef CONFIG_USB_TUSB_OMAP_DMA - if (tusb_dma_omap() && musb_ep->dma) { - struct dma_controller *c = musb->dma_controller; - struct dma_channel *channel = musb_ep->dma; - u32 dma_addr = request->dma + request->actual; - int ret; - - ret = c->channel_program(channel, - musb_ep->packet_sz, - channel->desired_mode, - dma_addr, - fifo_count); - if (ret) - return; - } -#endif - - musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) - (request->buf + request->actual)); - request->actual += fifo_count; - - /* REVISIT if we left anything in the fifo, flush - * it and report -EOVERFLOW - */ - - /* ack the read! */ - csr |= MUSB_RXCSR_P_WZC_BITS; - csr &= ~MUSB_RXCSR_RXPKTRDY; - musb_writew(epio, MUSB_RXCSR, csr); - } - } - - /* reach the end or short packet detected */ - if (request->actual == request->length || len < musb_ep->packet_sz) - musb_g_giveback(musb_ep, request, 0); -} - -/* - * Data ready for a request; called from IRQ - */ -void musb_g_rx(struct musb *musb, u8 epnum) -{ - u16 csr; - struct usb_request *request; - void __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; - void __iomem *epio = musb->endpoints[epnum].regs; - struct dma_channel *dma; - - musb_ep_select(mbase, epnum); - - request = next_request(musb_ep); - - csr = musb_readw(epio, MUSB_RXCSR); - dma = is_dma_capable() ? musb_ep->dma : NULL; - - DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, - csr, dma ? " (dma)" : "", request); - - if (csr & MUSB_RXCSR_P_SENTSTALL) { - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - dma->status = MUSB_DMA_STATUS_CORE_ABORT; - (void) musb->dma_controller->channel_abort(dma); - request->actual += musb_ep->dma->actual_len; - } - - csr |= MUSB_RXCSR_P_WZC_BITS; - csr &= ~MUSB_RXCSR_P_SENTSTALL; - musb_writew(epio, MUSB_RXCSR, csr); - - if (request) - musb_g_giveback(musb_ep, request, -EPIPE); - goto done; - } - - if (csr & MUSB_RXCSR_P_OVERRUN) { - /* csr |= MUSB_RXCSR_P_WZC_BITS; */ - csr &= ~MUSB_RXCSR_P_OVERRUN; - musb_writew(epio, MUSB_RXCSR, csr); - - DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); - if (request && request->status == -EINPROGRESS) - request->status = -EOVERFLOW; - } - if (csr & MUSB_RXCSR_INCOMPRX) { - /* REVISIT not necessarily an error */ - DBG(4, "%s, incomprx\n", musb_ep->end_point.name); - } - - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - /* "should not happen"; likely RXPKTRDY pending for DMA */ - DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, - "%s busy, csr %04x\n", - musb_ep->end_point.name, csr); - goto done; - } - - if (dma && (csr & MUSB_RXCSR_DMAENAB)) { - csr &= ~(MUSB_RXCSR_AUTOCLEAR - | MUSB_RXCSR_DMAENAB - | MUSB_RXCSR_DMAMODE); - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_P_WZC_BITS | csr); - - request->actual += musb_ep->dma->actual_len; - - DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", - epnum, csr, - musb_readw(epio, MUSB_RXCSR), - musb_ep->dma->actual_len, request); - -#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) - /* Autoclear doesn't clear RxPktRdy for short packets */ - if ((dma->desired_mode == 0) - || (dma->actual_len - & (musb_ep->packet_sz - 1))) { - /* ack the read! */ - csr &= ~MUSB_RXCSR_RXPKTRDY; - musb_writew(epio, MUSB_RXCSR, csr); - } - - /* incomplete, and not short? wait for next IN packet */ - if ((request->actual < request->length) - && (musb_ep->dma->actual_len - == musb_ep->packet_sz)) - goto done; -#endif - musb_g_giveback(musb_ep, request, 0); - - request = next_request(musb_ep); - if (!request) - goto done; - - /* don't start more i/o till the stall clears */ - musb_ep_select(mbase, epnum); - csr = musb_readw(epio, MUSB_RXCSR); - if (csr & MUSB_RXCSR_P_SENDSTALL) - goto done; - } - - - /* analyze request if the ep is hot */ - if (request) - rxstate(musb, to_musb_request(request)); - else - DBG(3, "packet waiting for %s%s request\n", - musb_ep->desc ? "" : "inactive ", - musb_ep->end_point.name); - -done: - return; -} - -/* ------------------------------------------------------------ */ - -static int musb_gadget_enable(struct usb_ep *ep, - const struct usb_endpoint_descriptor *desc) -{ - unsigned long flags; - struct musb_ep *musb_ep; - struct musb_hw_ep *hw_ep; - void __iomem *regs; - struct musb *musb; - void __iomem *mbase; - u8 epnum; - u16 csr; - unsigned tmp; - int status = -EINVAL; - - if (!ep || !desc) - return -EINVAL; - - musb_ep = to_musb_ep(ep); - hw_ep = musb_ep->hw_ep; - regs = hw_ep->regs; - musb = musb_ep->musb; - mbase = musb->mregs; - epnum = musb_ep->current_epnum; - - spin_lock_irqsave(&musb->lock, flags); - - if (musb_ep->desc) { - status = -EBUSY; - goto fail; - } - musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; - - /* check direction and (later) maxpacket size against endpoint */ - if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) - goto fail; - - /* REVISIT this rules out high bandwidth periodic transfers */ - tmp = le16_to_cpu(desc->wMaxPacketSize); - if (tmp & ~0x07ff) - goto fail; - musb_ep->packet_sz = tmp; - - /* enable the interrupts for the endpoint, set the endpoint - * packet size (or fail), set the mode, clear the fifo - */ - musb_ep_select(mbase, epnum); - if (desc->bEndpointAddress & USB_DIR_IN) { - u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); - - if (hw_ep->is_shared_fifo) - musb_ep->is_in = 1; - if (!musb_ep->is_in) - goto fail; - if (tmp > hw_ep->max_packet_sz_tx) - goto fail; - - int_txe |= (1 << epnum); - musb_writew(mbase, MUSB_INTRTXE, int_txe); - - /* REVISIT if can_bulk_split(), use by updating "tmp"; - * likewise high bandwidth periodic tx - */ - musb_writew(regs, MUSB_TXMAXP, tmp); - - csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; - if (musb_readw(regs, MUSB_TXCSR) - & MUSB_TXCSR_FIFONOTEMPTY) - csr |= MUSB_TXCSR_FLUSHFIFO; - if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) - csr |= MUSB_TXCSR_P_ISO; - - /* set twice in case of double buffering */ - musb_writew(regs, MUSB_TXCSR, csr); - /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ - musb_writew(regs, MUSB_TXCSR, csr); - - } else { - u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); - - if (hw_ep->is_shared_fifo) - musb_ep->is_in = 0; - if (musb_ep->is_in) - goto fail; - if (tmp > hw_ep->max_packet_sz_rx) - goto fail; - - int_rxe |= (1 << epnum); - musb_writew(mbase, MUSB_INTRRXE, int_rxe); - - /* REVISIT if can_bulk_combine() use by updating "tmp" - * likewise high bandwidth periodic rx - */ - musb_writew(regs, MUSB_RXMAXP, tmp); - - /* force shared fifo to OUT-only mode */ - if (hw_ep->is_shared_fifo) { - csr = musb_readw(regs, MUSB_TXCSR); - csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); - musb_writew(regs, MUSB_TXCSR, csr); - } - - csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; - if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) - csr |= MUSB_RXCSR_P_ISO; - else if (musb_ep->type == USB_ENDPOINT_XFER_INT) - csr |= MUSB_RXCSR_DISNYET; - - /* set twice in case of double buffering */ - musb_writew(regs, MUSB_RXCSR, csr); - musb_writew(regs, MUSB_RXCSR, csr); - } - - /* NOTE: all the I/O code _should_ work fine without DMA, in case - * for some reason you run out of channels here. - */ - if (is_dma_capable() && musb->dma_controller) { - struct dma_controller *c = musb->dma_controller; - - musb_ep->dma = c->channel_alloc(c, hw_ep, - (desc->bEndpointAddress & USB_DIR_IN)); - } else - musb_ep->dma = NULL; - - musb_ep->desc = desc; - musb_ep->busy = 0; - status = 0; - - pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", - musb_driver_name, musb_ep->end_point.name, - ({ char *s; switch (musb_ep->type) { - case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; - case USB_ENDPOINT_XFER_INT: s = "int"; break; - default: s = "iso"; break; - }; s; }), - musb_ep->is_in ? "IN" : "OUT", - musb_ep->dma ? "dma, " : "", - musb_ep->packet_sz); - - schedule_work(&musb->irq_work); - -fail: - spin_unlock_irqrestore(&musb->lock, flags); - return status; -} - -/* - * Disable an endpoint flushing all requests queued. - */ -static int musb_gadget_disable(struct usb_ep *ep) -{ - unsigned long flags; - struct musb *musb; - u8 epnum; - struct musb_ep *musb_ep; - void __iomem *epio; - int status = 0; - - musb_ep = to_musb_ep(ep); - musb = musb_ep->musb; - epnum = musb_ep->current_epnum; - epio = musb->endpoints[epnum].regs; - - spin_lock_irqsave(&musb->lock, flags); - musb_ep_select(musb->mregs, epnum); - - /* zero the endpoint sizes */ - if (musb_ep->is_in) { - u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); - int_txe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); - musb_writew(epio, MUSB_TXMAXP, 0); - } else { - u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); - int_rxe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); - musb_writew(epio, MUSB_RXMAXP, 0); - } - - musb_ep->desc = NULL; - - /* abort all pending DMA and requests */ - nuke(musb_ep, -ESHUTDOWN); - - schedule_work(&musb->irq_work); - - spin_unlock_irqrestore(&(musb->lock), flags); - - DBG(2, "%s\n", musb_ep->end_point.name); - - return status; -} - -/* - * Allocate a request for an endpoint. - * Reused by ep0 code. - */ -struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) -{ - struct musb_ep *musb_ep = to_musb_ep(ep); - struct musb_request *request = NULL; - - request = kzalloc(sizeof *request, gfp_flags); - if (request) { - INIT_LIST_HEAD(&request->request.list); - request->request.dma = DMA_ADDR_INVALID; - request->epnum = musb_ep->current_epnum; - request->ep = musb_ep; - } - - return &request->request; -} - -/* - * Free a request - * Reused by ep0 code. - */ -void musb_free_request(struct usb_ep *ep, struct usb_request *req) -{ - kfree(to_musb_request(req)); -} - -static LIST_HEAD(buffers); - -struct free_record { - struct list_head list; - struct device *dev; - unsigned bytes; - dma_addr_t dma; -}; - -/* - * Context: controller locked, IRQs blocked. - */ -static void musb_ep_restart(struct musb *musb, struct musb_request *req) -{ - DBG(3, "<== %s request %p len %u on hw_ep%d\n", - req->tx ? "TX/IN" : "RX/OUT", - &req->request, req->request.length, req->epnum); - - musb_ep_select(musb->mregs, req->epnum); - if (req->tx) - txstate(musb, req); - else - rxstate(musb, req); -} - -static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, - gfp_t gfp_flags) -{ - struct musb_ep *musb_ep; - struct musb_request *request; - struct musb *musb; - int status = 0; - unsigned long lockflags; - - if (!ep || !req) - return -EINVAL; - if (!req->buf) - return -ENODATA; - - musb_ep = to_musb_ep(ep); - musb = musb_ep->musb; - - request = to_musb_request(req); - request->musb = musb; - - if (request->ep != musb_ep) - return -EINVAL; - - DBG(4, "<== to %s request=%p\n", ep->name, req); - - /* request is mine now... */ - request->request.actual = 0; - request->request.status = -EINPROGRESS; - request->epnum = musb_ep->current_epnum; - request->tx = musb_ep->is_in; - - if (is_dma_capable() && musb_ep->dma) { - if (request->request.dma == DMA_ADDR_INVALID) { - request->request.dma = dma_map_single( - musb->controller, - request->request.buf, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 1; - } else { - dma_sync_single_for_device(musb->controller, - request->request.dma, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 0; - } - } else if (!req->buf) { - return -ENODATA; - } else - request->mapped = 0; - - spin_lock_irqsave(&musb->lock, lockflags); - - /* don't queue if the ep is down */ - if (!musb_ep->desc) { - DBG(4, "req %p queued to %s while ep %s\n", - req, ep->name, "disabled"); - status = -ESHUTDOWN; - goto cleanup; - } - - /* add request to the list */ - list_add_tail(&(request->request.list), &(musb_ep->req_list)); - - /* it this is the head of the queue, start i/o ... */ - if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) - musb_ep_restart(musb, request); - -cleanup: - spin_unlock_irqrestore(&musb->lock, lockflags); - return status; -} - -static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) -{ - struct musb_ep *musb_ep = to_musb_ep(ep); - struct usb_request *r; - unsigned long flags; - int status = 0; - struct musb *musb = musb_ep->musb; - - if (!ep || !request || to_musb_request(request)->ep != musb_ep) - return -EINVAL; - - spin_lock_irqsave(&musb->lock, flags); - - list_for_each_entry(r, &musb_ep->req_list, list) { - if (r == request) - break; - } - if (r != request) { - DBG(3, "request %p not queued to %s\n", request, ep->name); - status = -EINVAL; - goto done; - } - - /* if the hardware doesn't have the request, easy ... */ - if (musb_ep->req_list.next != &request->list || musb_ep->busy) - musb_g_giveback(musb_ep, request, -ECONNRESET); - - /* ... else abort the dma transfer ... */ - else if (is_dma_capable() && musb_ep->dma) { - struct dma_controller *c = musb->dma_controller; - - musb_ep_select(musb->mregs, musb_ep->current_epnum); - if (c->channel_abort) - status = c->channel_abort(musb_ep->dma); - else - status = -EBUSY; - if (status == 0) - musb_g_giveback(musb_ep, request, -ECONNRESET); - } else { - /* NOTE: by sticking to easily tested hardware/driver states, - * we leave counting of in-flight packets imprecise. - */ - musb_g_giveback(musb_ep, request, -ECONNRESET); - } - -done: - spin_unlock_irqrestore(&musb->lock, flags); - return status; -} - -/* - * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any - * data but will queue requests. - * - * exported to ep0 code - */ -int musb_gadget_set_halt(struct usb_ep *ep, int value) -{ - struct musb_ep *musb_ep = to_musb_ep(ep); - u8 epnum = musb_ep->current_epnum; - struct musb *musb = musb_ep->musb; - void __iomem *epio = musb->endpoints[epnum].regs; - void __iomem *mbase; - unsigned long flags; - u16 csr; - struct musb_request *request = NULL; - int status = 0; - - if (!ep) - return -EINVAL; - mbase = musb->mregs; - - spin_lock_irqsave(&musb->lock, flags); - - if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { - status = -EINVAL; - goto done; - } - - musb_ep_select(mbase, epnum); - - /* cannot portably stall with non-empty FIFO */ - request = to_musb_request(next_request(musb_ep)); - if (value && musb_ep->is_in) { - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) { - DBG(3, "%s fifo busy, cannot halt\n", ep->name); - spin_unlock_irqrestore(&musb->lock, flags); - return -EAGAIN; - } - - } - - /* set/clear the stall and toggle bits */ - DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); - if (musb_ep->is_in) { - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) - csr |= MUSB_TXCSR_FLUSHFIFO; - csr |= MUSB_TXCSR_P_WZC_BITS - | MUSB_TXCSR_CLRDATATOG; - if (value) - csr |= MUSB_TXCSR_P_SENDSTALL; - else - csr &= ~(MUSB_TXCSR_P_SENDSTALL - | MUSB_TXCSR_P_SENTSTALL); - csr &= ~MUSB_TXCSR_TXPKTRDY; - musb_writew(epio, MUSB_TXCSR, csr); - } else { - csr = musb_readw(epio, MUSB_RXCSR); - csr |= MUSB_RXCSR_P_WZC_BITS - | MUSB_RXCSR_FLUSHFIFO - | MUSB_RXCSR_CLRDATATOG; - if (value) - csr |= MUSB_RXCSR_P_SENDSTALL; - else - csr &= ~(MUSB_RXCSR_P_SENDSTALL - | MUSB_RXCSR_P_SENTSTALL); - musb_writew(epio, MUSB_RXCSR, csr); - } - -done: - - /* maybe start the first request in the queue */ - if (!musb_ep->busy && !value && request) { - DBG(3, "restarting the request\n"); - musb_ep_restart(musb, request); - } - - spin_unlock_irqrestore(&musb->lock, flags); - return status; -} - -static int musb_gadget_fifo_status(struct usb_ep *ep) -{ - struct musb_ep *musb_ep = to_musb_ep(ep); - void __iomem *epio = musb_ep->hw_ep->regs; - int retval = -EINVAL; - - if (musb_ep->desc && !musb_ep->is_in) { - struct musb *musb = musb_ep->musb; - int epnum = musb_ep->current_epnum; - void __iomem *mbase = musb->mregs; - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - - musb_ep_select(mbase, epnum); - /* FIXME return zero unless RXPKTRDY is set */ - retval = musb_readw(epio, MUSB_RXCOUNT); - - spin_unlock_irqrestore(&musb->lock, flags); - } - return retval; -} - -static void musb_gadget_fifo_flush(struct usb_ep *ep) -{ - struct musb_ep *musb_ep = to_musb_ep(ep); - struct musb *musb = musb_ep->musb; - u8 epnum = musb_ep->current_epnum; - void __iomem *epio = musb->endpoints[epnum].regs; - void __iomem *mbase; - unsigned long flags; - u16 csr, int_txe; - - mbase = musb->mregs; - - spin_lock_irqsave(&musb->lock, flags); - musb_ep_select(mbase, (u8) epnum); - - /* disable interrupts */ - int_txe = musb_readw(mbase, MUSB_INTRTXE); - musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); - - if (musb_ep->is_in) { - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) { - csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; - musb_writew(epio, MUSB_TXCSR, csr); - /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ - musb_writew(epio, MUSB_TXCSR, csr); - } - } else { - csr = musb_readw(epio, MUSB_RXCSR); - csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; - musb_writew(epio, MUSB_RXCSR, csr); - musb_writew(epio, MUSB_RXCSR, csr); - } - - /* re-enable interrupt */ - musb_writew(mbase, MUSB_INTRTXE, int_txe); - spin_unlock_irqrestore(&musb->lock, flags); -} - -static const struct usb_ep_ops musb_ep_ops = { - .enable = musb_gadget_enable, - .disable = musb_gadget_disable, - .alloc_request = musb_alloc_request, - .free_request = musb_free_request, - .queue = musb_gadget_queue, - .dequeue = musb_gadget_dequeue, - .set_halt = musb_gadget_set_halt, - .fifo_status = musb_gadget_fifo_status, - .fifo_flush = musb_gadget_fifo_flush -}; - -/* ----------------------------------------------------------------------- */ - -static int musb_gadget_get_frame(struct usb_gadget *gadget) -{ - struct musb *musb = gadget_to_musb(gadget); - - return (int)musb_readw(musb->mregs, MUSB_FRAME); -} - -static int musb_gadget_wakeup(struct usb_gadget *gadget) -{ - struct musb *musb = gadget_to_musb(gadget); - void __iomem *mregs = musb->mregs; - unsigned long flags; - int status = -EINVAL; - u8 power, devctl; - int retries; - - spin_lock_irqsave(&musb->lock, flags); - - switch (musb->xceiv.state) { - case OTG_STATE_B_PERIPHERAL: - /* NOTE: OTG state machine doesn't include B_SUSPENDED; - * that's part of the standard usb 1.1 state machine, and - * doesn't affect OTG transitions. - */ - if (musb->may_wakeup && musb->is_suspended) - break; - goto done; - case OTG_STATE_B_IDLE: - /* Start SRP ... OTG not required. */ - devctl = musb_readb(mregs, MUSB_DEVCTL); - DBG(2, "Sending SRP: devctl: %02x\n", devctl); - devctl |= MUSB_DEVCTL_SESSION; - musb_writeb(mregs, MUSB_DEVCTL, devctl); - devctl = musb_readb(mregs, MUSB_DEVCTL); - retries = 100; - while (!(devctl & MUSB_DEVCTL_SESSION)) { - devctl = musb_readb(mregs, MUSB_DEVCTL); - if (retries-- < 1) - break; - } - retries = 10000; - while (devctl & MUSB_DEVCTL_SESSION) { - devctl = musb_readb(mregs, MUSB_DEVCTL); - if (retries-- < 1) - break; - } - - /* Block idling for at least 1s */ - musb_platform_try_idle(musb, - jiffies + msecs_to_jiffies(1 * HZ)); - - status = 0; - goto done; - default: - DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); - goto done; - } - - status = 0; - - power = musb_readb(mregs, MUSB_POWER); - power |= MUSB_POWER_RESUME; - musb_writeb(mregs, MUSB_POWER, power); - DBG(2, "issue wakeup\n"); - - /* FIXME do this next chunk in a timer callback, no udelay */ - mdelay(2); - - power = musb_readb(mregs, MUSB_POWER); - power &= ~MUSB_POWER_RESUME; - musb_writeb(mregs, MUSB_POWER, power); -done: - spin_unlock_irqrestore(&musb->lock, flags); - return status; -} - -static int -musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) -{ - struct musb *musb = gadget_to_musb(gadget); - - musb->is_self_powered = !!is_selfpowered; - return 0; -} - -static void musb_pullup(struct musb *musb, int is_on) -{ - u8 power; - - power = musb_readb(musb->mregs, MUSB_POWER); - if (is_on) - power |= MUSB_POWER_SOFTCONN; - else - power &= ~MUSB_POWER_SOFTCONN; - - /* FIXME if on, HdrcStart; if off, HdrcStop */ - - DBG(3, "gadget %s D+ pullup %s\n", - musb->gadget_driver->function, is_on ? "on" : "off"); - musb_writeb(musb->mregs, MUSB_POWER, power); -} - -#if 0 -static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) -{ - DBG(2, "<= %s =>\n", __func__); - - /* - * FIXME iff driver's softconnect flag is set (as it is during probe, - * though that can clear it), just musb_pullup(). - */ - - return -EINVAL; -} -#endif - -static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) -{ - struct musb *musb = gadget_to_musb(gadget); - - if (!musb->xceiv.set_power) - return -EOPNOTSUPP; - return otg_set_power(&musb->xceiv, mA); -} - -static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) -{ - struct musb *musb = gadget_to_musb(gadget); - unsigned long flags; - - is_on = !!is_on; - - /* NOTE: this assumes we are sensing vbus; we'd rather - * not pullup unless the B-session is active. - */ - spin_lock_irqsave(&musb->lock, flags); - if (is_on != musb->softconnect) { - musb->softconnect = is_on; - musb_pullup(musb, is_on); - } - spin_unlock_irqrestore(&musb->lock, flags); - return 0; -} - -static const struct usb_gadget_ops musb_gadget_operations = { - .get_frame = musb_gadget_get_frame, - .wakeup = musb_gadget_wakeup, - .set_selfpowered = musb_gadget_set_self_powered, - /* .vbus_session = musb_gadget_vbus_session, */ - .vbus_draw = musb_gadget_vbus_draw, - .pullup = musb_gadget_pullup, -}; - -/* ----------------------------------------------------------------------- */ - -/* Registration */ - -/* Only this registration code "knows" the rule (from USB standards) - * about there being only one external upstream port. It assumes - * all peripheral ports are external... - */ -static struct musb *the_gadget; - -static void musb_gadget_release(struct device *dev) -{ - /* kref_put(WHAT) */ - dev_dbg(dev, "%s\n", __func__); -} - - -static void __init -init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) -{ - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - - memset(ep, 0, sizeof *ep); - - ep->current_epnum = epnum; - ep->musb = musb; - ep->hw_ep = hw_ep; - ep->is_in = is_in; - - INIT_LIST_HEAD(&ep->req_list); - - sprintf(ep->name, "ep%d%s", epnum, - (!epnum || hw_ep->is_shared_fifo) ? "" : ( - is_in ? "in" : "out")); - ep->end_point.name = ep->name; - INIT_LIST_HEAD(&ep->end_point.ep_list); - if (!epnum) { - ep->end_point.maxpacket = 64; - ep->end_point.ops = &musb_g_ep0_ops; - musb->g.ep0 = &ep->end_point; - } else { - if (is_in) - ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; - else - ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; - ep->end_point.ops = &musb_ep_ops; - list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); - } -} - -/* - * Initialize the endpoints exposed to peripheral drivers, with backlinks - * to the rest of the driver state. - */ -static inline void __init musb_g_init_endpoints(struct musb *musb) -{ - u8 epnum; - struct musb_hw_ep *hw_ep; - unsigned count = 0; - - /* intialize endpoint list just once */ - INIT_LIST_HEAD(&(musb->g.ep_list)); - - for (epnum = 0, hw_ep = musb->endpoints; - epnum < musb->nr_endpoints; - epnum++, hw_ep++) { - if (hw_ep->is_shared_fifo /* || !epnum */) { - init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); - count++; - } else { - if (hw_ep->max_packet_sz_tx) { - init_peripheral_ep(musb, &hw_ep->ep_in, - epnum, 1); - count++; - } - if (hw_ep->max_packet_sz_rx) { - init_peripheral_ep(musb, &hw_ep->ep_out, - epnum, 0); - count++; - } - } - } -} - -/* called once during driver setup to initialize and link into - * the driver model; memory is zeroed. - */ -int __init musb_gadget_setup(struct musb *musb) -{ - int status; - - /* REVISIT minor race: if (erroneously) setting up two - * musb peripherals at the same time, only the bus lock - * is probably held. - */ - if (the_gadget) - return -EBUSY; - the_gadget = musb; - - musb->g.ops = &musb_gadget_operations; - musb->g.is_dualspeed = 1; - musb->g.speed = USB_SPEED_UNKNOWN; - - /* this "gadget" abstracts/virtualizes the controller */ - strcpy(musb->g.dev.bus_id, "gadget"); - musb->g.dev.parent = musb->controller; - musb->g.dev.dma_mask = musb->controller->dma_mask; - musb->g.dev.release = musb_gadget_release; - musb->g.name = musb_driver_name; - - if (is_otg_enabled(musb)) - musb->g.is_otg = 1; - - musb_g_init_endpoints(musb); - - musb->is_active = 0; - musb_platform_try_idle(musb, 0); - - status = device_register(&musb->g.dev); - if (status != 0) - the_gadget = NULL; - return status; -} - -void musb_gadget_cleanup(struct musb *musb) -{ - if (musb != the_gadget) - return; - - device_unregister(&musb->g.dev); - the_gadget = NULL; -} - -/* - * Register the gadget driver. Used by gadget drivers when - * registering themselves with the controller. - * - * -EINVAL something went wrong (not driver) - * -EBUSY another gadget is already using the controller - * -ENOMEM no memeory to perform the operation - * - * @param driver the gadget driver - * @return <0 if error, 0 if everything is fine - */ -int usb_gadget_register_driver(struct usb_gadget_driver *driver) -{ - int retval; - unsigned long flags; - struct musb *musb = the_gadget; - - if (!driver - || driver->speed != USB_SPEED_HIGH - || !driver->bind - || !driver->setup) - return -EINVAL; - - /* driver must be initialized to support peripheral mode */ - if (!musb || !(musb->board_mode == MUSB_OTG - || musb->board_mode != MUSB_OTG)) { - DBG(1, "%s, no dev??\n", __func__); - return -ENODEV; - } - - DBG(3, "registering driver %s\n", driver->function); - spin_lock_irqsave(&musb->lock, flags); - - if (musb->gadget_driver) { - DBG(1, "%s is already bound to %s\n", - musb_driver_name, - musb->gadget_driver->driver.name); - retval = -EBUSY; - } else { - musb->gadget_driver = driver; - musb->g.dev.driver = &driver->driver; - driver->driver.bus = NULL; - musb->softconnect = 1; - retval = 0; - } - - spin_unlock_irqrestore(&musb->lock, flags); - - if (retval == 0) { - retval = driver->bind(&musb->g); - if (retval != 0) { - DBG(3, "bind to driver %s failed --> %d\n", - driver->driver.name, retval); - musb->gadget_driver = NULL; - musb->g.dev.driver = NULL; - } - - spin_lock_irqsave(&musb->lock, flags); - - /* REVISIT always use otg_set_peripheral(), handling - * issues including the root hub one below ... - */ - musb->xceiv.gadget = &musb->g; - musb->xceiv.state = OTG_STATE_B_IDLE; - musb->is_active = 1; - - /* FIXME this ignores the softconnect flag. Drivers are - * allowed hold the peripheral inactive until for example - * userspace hooks up printer hardware or DSP codecs, so - * hosts only see fully functional devices. - */ - - if (!is_otg_enabled(musb)) - musb_start(musb); - - spin_unlock_irqrestore(&musb->lock, flags); - - if (is_otg_enabled(musb)) { - DBG(3, "OTG startup...\n"); - - /* REVISIT: funcall to other code, which also - * handles power budgeting ... this way also - * ensures HdrcStart is indirectly called. - */ - retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); - if (retval < 0) { - DBG(1, "add_hcd failed, %d\n", retval); - spin_lock_irqsave(&musb->lock, flags); - musb->xceiv.gadget = NULL; - musb->xceiv.state = OTG_STATE_UNDEFINED; - musb->gadget_driver = NULL; - musb->g.dev.driver = NULL; - spin_unlock_irqrestore(&musb->lock, flags); - } - } - } - - return retval; -} -EXPORT_SYMBOL(usb_gadget_register_driver); - -static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) -{ - int i; - struct musb_hw_ep *hw_ep; - - /* don't disconnect if it's not connected */ - if (musb->g.speed == USB_SPEED_UNKNOWN) - driver = NULL; - else - musb->g.speed = USB_SPEED_UNKNOWN; - - /* deactivate the hardware */ - if (musb->softconnect) { - musb->softconnect = 0; - musb_pullup(musb, 0); - } - musb_stop(musb); - - /* killing any outstanding requests will quiesce the driver; - * then report disconnect - */ - if (driver) { - for (i = 0, hw_ep = musb->endpoints; - i < musb->nr_endpoints; - i++, hw_ep++) { - musb_ep_select(musb->mregs, i); - if (hw_ep->is_shared_fifo /* || !epnum */) { - nuke(&hw_ep->ep_in, -ESHUTDOWN); - } else { - if (hw_ep->max_packet_sz_tx) - nuke(&hw_ep->ep_in, -ESHUTDOWN); - if (hw_ep->max_packet_sz_rx) - nuke(&hw_ep->ep_out, -ESHUTDOWN); - } - } - - spin_unlock(&musb->lock); - driver->disconnect(&musb->g); - spin_lock(&musb->lock); - } -} - -/* - * Unregister the gadget driver. Used by gadget drivers when - * unregistering themselves from the controller. - * - * @param driver the gadget driver to unregister - */ -int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) -{ - unsigned long flags; - int retval = 0; - struct musb *musb = the_gadget; - - if (!driver || !driver->unbind || !musb) - return -EINVAL; - - /* REVISIT always use otg_set_peripheral() here too; - * this needs to shut down the OTG engine. - */ - - spin_lock_irqsave(&musb->lock, flags); - -#ifdef CONFIG_USB_MUSB_OTG - musb_hnp_stop(musb); -#endif - - if (musb->gadget_driver == driver) { - - (void) musb_gadget_vbus_draw(&musb->g, 0); - - musb->xceiv.state = OTG_STATE_UNDEFINED; - stop_activity(musb, driver); - - DBG(3, "unregistering driver %s\n", driver->function); - spin_unlock_irqrestore(&musb->lock, flags); - driver->unbind(&musb->g); - spin_lock_irqsave(&musb->lock, flags); - - musb->gadget_driver = NULL; - musb->g.dev.driver = NULL; - - musb->is_active = 0; - musb_platform_try_idle(musb, 0); - } else - retval = -EINVAL; - spin_unlock_irqrestore(&musb->lock, flags); - - if (is_otg_enabled(musb) && retval == 0) { - usb_remove_hcd(musb_to_hcd(musb)); - /* FIXME we need to be able to register another - * gadget driver here and have everything work; - * that currently misbehaves. - */ - } - - return retval; -} -EXPORT_SYMBOL(usb_gadget_unregister_driver); - - -/* ----------------------------------------------------------------------- */ - -/* lifecycle operations called through plat_uds.c */ - -void musb_g_resume(struct musb *musb) -{ - musb->is_suspended = 0; - switch (musb->xceiv.state) { - case OTG_STATE_B_IDLE: - break; - case OTG_STATE_B_WAIT_ACON: - case OTG_STATE_B_PERIPHERAL: - musb->is_active = 1; - if (musb->gadget_driver && musb->gadget_driver->resume) { - spin_unlock(&musb->lock); - musb->gadget_driver->resume(&musb->g); - spin_lock(&musb->lock); - } - break; - default: - WARNING("unhandled RESUME transition (%s)\n", - otg_state_string(musb)); - } -} - -/* called when SOF packets stop for 3+ msec */ -void musb_g_suspend(struct musb *musb) -{ - u8 devctl; - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - DBG(3, "devctl %02x\n", devctl); - - switch (musb->xceiv.state) { - case OTG_STATE_B_IDLE: - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - break; - case OTG_STATE_B_PERIPHERAL: - musb->is_suspended = 1; - if (musb->gadget_driver && musb->gadget_driver->suspend) { - spin_unlock(&musb->lock); - musb->gadget_driver->suspend(&musb->g); - spin_lock(&musb->lock); - } - break; - default: - /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; - * A_PERIPHERAL may need care too - */ - WARNING("unhandled SUSPEND transition (%s)\n", - otg_state_string(musb)); - } -} - -/* Called during SRP */ -void musb_g_wakeup(struct musb *musb) -{ - musb_gadget_wakeup(&musb->g); -} - -/* called when VBUS drops below session threshold, and in other cases */ -void musb_g_disconnect(struct musb *musb) -{ - void __iomem *mregs = musb->mregs; - u8 devctl = musb_readb(mregs, MUSB_DEVCTL); - - DBG(3, "devctl %02x\n", devctl); - - /* clear HR */ - musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); - - /* don't draw vbus until new b-default session */ - (void) musb_gadget_vbus_draw(&musb->g, 0); - - musb->g.speed = USB_SPEED_UNKNOWN; - if (musb->gadget_driver && musb->gadget_driver->disconnect) { - spin_unlock(&musb->lock); - musb->gadget_driver->disconnect(&musb->g); - spin_lock(&musb->lock); - } - - switch (musb->xceiv.state) { - default: -#ifdef CONFIG_USB_MUSB_OTG - DBG(2, "Unhandled disconnect %s, setting a_idle\n", - otg_state_string(musb)); - musb->xceiv.state = OTG_STATE_A_IDLE; - break; - case OTG_STATE_A_PERIPHERAL: - musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_B_WAIT_ACON: - case OTG_STATE_B_HOST: -#endif - case OTG_STATE_B_PERIPHERAL: - case OTG_STATE_B_IDLE: - musb->xceiv.state = OTG_STATE_B_IDLE; - break; - case OTG_STATE_B_SRP_INIT: - break; - } - - musb->is_active = 0; -} - -void musb_g_reset(struct musb *musb) -__releases(musb->lock) -__acquires(musb->lock) -{ - void __iomem *mbase = musb->mregs; - u8 devctl = musb_readb(mbase, MUSB_DEVCTL); - u8 power; - - DBG(3, "<== %s addr=%x driver '%s'\n", - (devctl & MUSB_DEVCTL_BDEVICE) - ? "B-Device" : "A-Device", - musb_readb(mbase, MUSB_FADDR), - musb->gadget_driver - ? musb->gadget_driver->driver.name - : NULL - ); - - /* report disconnect, if we didn't already (flushing EP state) */ - if (musb->g.speed != USB_SPEED_UNKNOWN) - musb_g_disconnect(musb); - - /* clear HR */ - else if (devctl & MUSB_DEVCTL_HR) - musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); - - - /* what speed did we negotiate? */ - power = musb_readb(mbase, MUSB_POWER); - musb->g.speed = (power & MUSB_POWER_HSMODE) - ? USB_SPEED_HIGH : USB_SPEED_FULL; - - /* start in USB_STATE_DEFAULT */ - musb->is_active = 1; - musb->is_suspended = 0; - MUSB_DEV_MODE(musb); - musb->address = 0; - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - - musb->may_wakeup = 0; - musb->g.b_hnp_enable = 0; - musb->g.a_alt_hnp_support = 0; - musb->g.a_hnp_support = 0; - - /* Normal reset, as B-Device; - * or else after HNP, as A-Device - */ - if (devctl & MUSB_DEVCTL_BDEVICE) { - musb->xceiv.state = OTG_STATE_B_PERIPHERAL; - musb->g.is_a_peripheral = 0; - } else if (is_otg_enabled(musb)) { - musb->xceiv.state = OTG_STATE_A_PERIPHERAL; - musb->g.is_a_peripheral = 1; - } else - WARN_ON(1); - - /* start with default limits on VBUS power draw */ - (void) musb_gadget_vbus_draw(&musb->g, - is_otg_enabled(musb) ? 8 : 100); -} diff --git a/trunk/drivers/usb/musb/musb_gadget.h b/trunk/drivers/usb/musb/musb_gadget.h deleted file mode 100644 index 59502da9f739..000000000000 --- a/trunk/drivers/usb/musb/musb_gadget.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * MUSB OTG driver peripheral defines - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_GADGET_H -#define __MUSB_GADGET_H - -struct musb_request { - struct usb_request request; - struct musb_ep *ep; - struct musb *musb; - u8 tx; /* endpoint direction */ - u8 epnum; - u8 mapped; -}; - -static inline struct musb_request *to_musb_request(struct usb_request *req) -{ - return req ? container_of(req, struct musb_request, request) : NULL; -} - -extern struct usb_request * -musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); -extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); - - -/* - * struct musb_ep - peripheral side view of endpoint rx or tx side - */ -struct musb_ep { - /* stuff towards the head is basically write-once. */ - struct usb_ep end_point; - char name[12]; - struct musb_hw_ep *hw_ep; - struct musb *musb; - u8 current_epnum; - - /* ... when enabled/disabled ... */ - u8 type; - u8 is_in; - u16 packet_sz; - const struct usb_endpoint_descriptor *desc; - struct dma_channel *dma; - - /* later things are modified based on usage */ - struct list_head req_list; - - /* true if lock must be dropped but req_list may not be advanced */ - u8 busy; -}; - -static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) -{ - return ep ? container_of(ep, struct musb_ep, end_point) : NULL; -} - -static inline struct usb_request *next_request(struct musb_ep *ep) -{ - struct list_head *queue = &ep->req_list; - - if (list_empty(queue)) - return NULL; - return container_of(queue->next, struct usb_request, list); -} - -extern void musb_g_tx(struct musb *musb, u8 epnum); -extern void musb_g_rx(struct musb *musb, u8 epnum); - -extern const struct usb_ep_ops musb_g_ep0_ops; - -extern int musb_gadget_setup(struct musb *); -extern void musb_gadget_cleanup(struct musb *); - -extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); - -extern int musb_gadget_set_halt(struct usb_ep *ep, int value); - -#endif /* __MUSB_GADGET_H */ diff --git a/trunk/drivers/usb/musb/musb_gadget_ep0.c b/trunk/drivers/usb/musb/musb_gadget_ep0.c deleted file mode 100644 index 48d7d3ccb243..000000000000 --- a/trunk/drivers/usb/musb/musb_gadget_ep0.c +++ /dev/null @@ -1,981 +0,0 @@ -/* - * MUSB OTG peripheral driver ep0 handling - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" - -/* ep0 is always musb->endpoints[0].ep_in */ -#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) - -/* - * locking note: we use only the controller lock, for simpler correctness. - * It's always held with IRQs blocked. - * - * It protects the ep0 request queue as well as ep0_state, not just the - * controller and indexed registers. And that lock stays held unless it - * needs to be dropped to allow reentering this driver ... like upcalls to - * the gadget driver, or adjusting endpoint halt status. - */ - -static char *decode_ep0stage(u8 stage) -{ - switch (stage) { - case MUSB_EP0_STAGE_SETUP: return "idle"; - case MUSB_EP0_STAGE_TX: return "in"; - case MUSB_EP0_STAGE_RX: return "out"; - case MUSB_EP0_STAGE_ACKWAIT: return "wait"; - case MUSB_EP0_STAGE_STATUSIN: return "in/status"; - case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; - default: return "?"; - } -} - -/* handle a standard GET_STATUS request - * Context: caller holds controller lock - */ -static int service_tx_status_request( - struct musb *musb, - const struct usb_ctrlrequest *ctrlrequest) -{ - void __iomem *mbase = musb->mregs; - int handled = 1; - u8 result[2], epnum = 0; - const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; - - result[1] = 0; - - switch (recip) { - case USB_RECIP_DEVICE: - result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; - result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; -#ifdef CONFIG_USB_MUSB_OTG - if (musb->g.is_otg) { - result[0] |= musb->g.b_hnp_enable - << USB_DEVICE_B_HNP_ENABLE; - result[0] |= musb->g.a_alt_hnp_support - << USB_DEVICE_A_ALT_HNP_SUPPORT; - result[0] |= musb->g.a_hnp_support - << USB_DEVICE_A_HNP_SUPPORT; - } -#endif - break; - - case USB_RECIP_INTERFACE: - result[0] = 0; - break; - - case USB_RECIP_ENDPOINT: { - int is_in; - struct musb_ep *ep; - u16 tmp; - void __iomem *regs; - - epnum = (u8) ctrlrequest->wIndex; - if (!epnum) { - result[0] = 0; - break; - } - - is_in = epnum & USB_DIR_IN; - if (is_in) { - epnum &= 0x0f; - ep = &musb->endpoints[epnum].ep_in; - } else { - ep = &musb->endpoints[epnum].ep_out; - } - regs = musb->endpoints[epnum].regs; - - if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { - handled = -EINVAL; - break; - } - - musb_ep_select(mbase, epnum); - if (is_in) - tmp = musb_readw(regs, MUSB_TXCSR) - & MUSB_TXCSR_P_SENDSTALL; - else - tmp = musb_readw(regs, MUSB_RXCSR) - & MUSB_RXCSR_P_SENDSTALL; - musb_ep_select(mbase, 0); - - result[0] = tmp ? 1 : 0; - } break; - - default: - /* class, vendor, etc ... delegate */ - handled = 0; - break; - } - - /* fill up the fifo; caller updates csr0 */ - if (handled > 0) { - u16 len = le16_to_cpu(ctrlrequest->wLength); - - if (len > 2) - len = 2; - musb_write_fifo(&musb->endpoints[0], len, result); - } - - return handled; -} - -/* - * handle a control-IN request, the end0 buffer contains the current request - * that is supposed to be a standard control request. Assumes the fifo to - * be at least 2 bytes long. - * - * @return 0 if the request was NOT HANDLED, - * < 0 when error - * > 0 when the request is processed - * - * Context: caller holds controller lock - */ -static int -service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) -{ - int handled = 0; /* not handled */ - - if ((ctrlrequest->bRequestType & USB_TYPE_MASK) - == USB_TYPE_STANDARD) { - switch (ctrlrequest->bRequest) { - case USB_REQ_GET_STATUS: - handled = service_tx_status_request(musb, - ctrlrequest); - break; - - /* case USB_REQ_SYNC_FRAME: */ - - default: - break; - } - } - return handled; -} - -/* - * Context: caller holds controller lock - */ -static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) -{ - musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); - musb->ep0_state = MUSB_EP0_STAGE_SETUP; -} - -/* - * Tries to start B-device HNP negotiation if enabled via sysfs - */ -static inline void musb_try_b_hnp_enable(struct musb *musb) -{ - void __iomem *mbase = musb->mregs; - u8 devctl; - - DBG(1, "HNP: Setting HR\n"); - devctl = musb_readb(mbase, MUSB_DEVCTL); - musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); -} - -/* - * Handle all control requests with no DATA stage, including standard - * requests such as: - * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized - * always delegated to the gadget driver - * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE - * always handled here, except for class/vendor/... features - * - * Context: caller holds controller lock - */ -static int -service_zero_data_request(struct musb *musb, - struct usb_ctrlrequest *ctrlrequest) -__releases(musb->lock) -__acquires(musb->lock) -{ - int handled = -EINVAL; - void __iomem *mbase = musb->mregs; - const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; - - /* the gadget driver handles everything except what we MUST handle */ - if ((ctrlrequest->bRequestType & USB_TYPE_MASK) - == USB_TYPE_STANDARD) { - switch (ctrlrequest->bRequest) { - case USB_REQ_SET_ADDRESS: - /* change it after the status stage */ - musb->set_address = true; - musb->address = (u8) (ctrlrequest->wValue & 0x7f); - handled = 1; - break; - - case USB_REQ_CLEAR_FEATURE: - switch (recip) { - case USB_RECIP_DEVICE: - if (ctrlrequest->wValue - != USB_DEVICE_REMOTE_WAKEUP) - break; - musb->may_wakeup = 0; - handled = 1; - break; - case USB_RECIP_INTERFACE: - break; - case USB_RECIP_ENDPOINT:{ - const u8 num = ctrlrequest->wIndex & 0x0f; - struct musb_ep *musb_ep; - - if (num == 0 - || num >= MUSB_C_NUM_EPS - || ctrlrequest->wValue - != USB_ENDPOINT_HALT) - break; - - if (ctrlrequest->wIndex & USB_DIR_IN) - musb_ep = &musb->endpoints[num].ep_in; - else - musb_ep = &musb->endpoints[num].ep_out; - if (!musb_ep->desc) - break; - - /* REVISIT do it directly, no locking games */ - spin_unlock(&musb->lock); - musb_gadget_set_halt(&musb_ep->end_point, 0); - spin_lock(&musb->lock); - - /* select ep0 again */ - musb_ep_select(mbase, 0); - handled = 1; - } break; - default: - /* class, vendor, etc ... delegate */ - handled = 0; - break; - } - break; - - case USB_REQ_SET_FEATURE: - switch (recip) { - case USB_RECIP_DEVICE: - handled = 1; - switch (ctrlrequest->wValue) { - case USB_DEVICE_REMOTE_WAKEUP: - musb->may_wakeup = 1; - break; - case USB_DEVICE_TEST_MODE: - if (musb->g.speed != USB_SPEED_HIGH) - goto stall; - if (ctrlrequest->wIndex & 0xff) - goto stall; - - switch (ctrlrequest->wIndex >> 8) { - case 1: - pr_debug("TEST_J\n"); - /* TEST_J */ - musb->test_mode_nr = - MUSB_TEST_J; - break; - case 2: - /* TEST_K */ - pr_debug("TEST_K\n"); - musb->test_mode_nr = - MUSB_TEST_K; - break; - case 3: - /* TEST_SE0_NAK */ - pr_debug("TEST_SE0_NAK\n"); - musb->test_mode_nr = - MUSB_TEST_SE0_NAK; - break; - case 4: - /* TEST_PACKET */ - pr_debug("TEST_PACKET\n"); - musb->test_mode_nr = - MUSB_TEST_PACKET; - break; - default: - goto stall; - } - - /* enter test mode after irq */ - if (handled > 0) - musb->test_mode = true; - break; -#ifdef CONFIG_USB_MUSB_OTG - case USB_DEVICE_B_HNP_ENABLE: - if (!musb->g.is_otg) - goto stall; - musb->g.b_hnp_enable = 1; - musb_try_b_hnp_enable(musb); - break; - case USB_DEVICE_A_HNP_SUPPORT: - if (!musb->g.is_otg) - goto stall; - musb->g.a_hnp_support = 1; - break; - case USB_DEVICE_A_ALT_HNP_SUPPORT: - if (!musb->g.is_otg) - goto stall; - musb->g.a_alt_hnp_support = 1; - break; -#endif -stall: - default: - handled = -EINVAL; - break; - } - break; - - case USB_RECIP_INTERFACE: - break; - - case USB_RECIP_ENDPOINT:{ - const u8 epnum = - ctrlrequest->wIndex & 0x0f; - struct musb_ep *musb_ep; - struct musb_hw_ep *ep; - void __iomem *regs; - int is_in; - u16 csr; - - if (epnum == 0 - || epnum >= MUSB_C_NUM_EPS - || ctrlrequest->wValue - != USB_ENDPOINT_HALT) - break; - - ep = musb->endpoints + epnum; - regs = ep->regs; - is_in = ctrlrequest->wIndex & USB_DIR_IN; - if (is_in) - musb_ep = &ep->ep_in; - else - musb_ep = &ep->ep_out; - if (!musb_ep->desc) - break; - - musb_ep_select(mbase, epnum); - if (is_in) { - csr = musb_readw(regs, - MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) - csr |= MUSB_TXCSR_FLUSHFIFO; - csr |= MUSB_TXCSR_P_SENDSTALL - | MUSB_TXCSR_CLRDATATOG - | MUSB_TXCSR_P_WZC_BITS; - musb_writew(regs, MUSB_TXCSR, - csr); - } else { - csr = musb_readw(regs, - MUSB_RXCSR); - csr |= MUSB_RXCSR_P_SENDSTALL - | MUSB_RXCSR_FLUSHFIFO - | MUSB_RXCSR_CLRDATATOG - | MUSB_TXCSR_P_WZC_BITS; - musb_writew(regs, MUSB_RXCSR, - csr); - } - - /* select ep0 again */ - musb_ep_select(mbase, 0); - handled = 1; - } break; - - default: - /* class, vendor, etc ... delegate */ - handled = 0; - break; - } - break; - default: - /* delegate SET_CONFIGURATION, etc */ - handled = 0; - } - } else - handled = 0; - return handled; -} - -/* we have an ep0out data packet - * Context: caller holds controller lock - */ -static void ep0_rxstate(struct musb *musb) -{ - void __iomem *regs = musb->control_ep->regs; - struct usb_request *req; - u16 tmp; - - req = next_ep0_request(musb); - - /* read packet and ack; or stall because of gadget driver bug: - * should have provided the rx buffer before setup() returned. - */ - if (req) { - void *buf = req->buf + req->actual; - unsigned len = req->length - req->actual; - - /* read the buffer */ - tmp = musb_readb(regs, MUSB_COUNT0); - if (tmp > len) { - req->status = -EOVERFLOW; - tmp = len; - } - musb_read_fifo(&musb->endpoints[0], tmp, buf); - req->actual += tmp; - tmp = MUSB_CSR0_P_SVDRXPKTRDY; - if (tmp < 64 || req->actual == req->length) { - musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; - tmp |= MUSB_CSR0_P_DATAEND; - } else - req = NULL; - } else - tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; - - - /* Completion handler may choose to stall, e.g. because the - * message just received holds invalid data. - */ - if (req) { - musb->ackpend = tmp; - musb_g_ep0_giveback(musb, req); - if (!musb->ackpend) - return; - musb->ackpend = 0; - } - musb_writew(regs, MUSB_CSR0, tmp); -} - -/* - * transmitting to the host (IN), this code might be called from IRQ - * and from kernel thread. - * - * Context: caller holds controller lock - */ -static void ep0_txstate(struct musb *musb) -{ - void __iomem *regs = musb->control_ep->regs; - struct usb_request *request = next_ep0_request(musb); - u16 csr = MUSB_CSR0_TXPKTRDY; - u8 *fifo_src; - u8 fifo_count; - - if (!request) { - /* WARN_ON(1); */ - DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); - return; - } - - /* load the data */ - fifo_src = (u8 *) request->buf + request->actual; - fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, - request->length - request->actual); - musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); - request->actual += fifo_count; - - /* update the flags */ - if (fifo_count < MUSB_MAX_END0_PACKET - || request->actual == request->length) { - musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; - csr |= MUSB_CSR0_P_DATAEND; - } else - request = NULL; - - /* report completions as soon as the fifo's loaded; there's no - * win in waiting till this last packet gets acked. (other than - * very precise fault reporting, needed by USB TMC; possible with - * this hardware, but not usable from portable gadget drivers.) - */ - if (request) { - musb->ackpend = csr; - musb_g_ep0_giveback(musb, request); - if (!musb->ackpend) - return; - musb->ackpend = 0; - } - - /* send it out, triggering a "txpktrdy cleared" irq */ - musb_writew(regs, MUSB_CSR0, csr); -} - -/* - * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. - * Fields are left in USB byte-order. - * - * Context: caller holds controller lock. - */ -static void -musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) -{ - struct usb_request *r; - void __iomem *regs = musb->control_ep->regs; - - musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); - - /* NOTE: earlier 2.6 versions changed setup packets to host - * order, but now USB packets always stay in USB byte order. - */ - DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n", - req->bRequestType, - req->bRequest, - le16_to_cpu(req->wValue), - le16_to_cpu(req->wIndex), - le16_to_cpu(req->wLength)); - - /* clean up any leftover transfers */ - r = next_ep0_request(musb); - if (r) - musb_g_ep0_giveback(musb, r); - - /* For zero-data requests we want to delay the STATUS stage to - * avoid SETUPEND errors. If we read data (OUT), delay accepting - * packets until there's a buffer to store them in. - * - * If we write data, the controller acts happier if we enable - * the TX FIFO right away, and give the controller a moment - * to switch modes... - */ - musb->set_address = false; - musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; - if (req->wLength == 0) { - if (req->bRequestType & USB_DIR_IN) - musb->ackpend |= MUSB_CSR0_TXPKTRDY; - musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; - } else if (req->bRequestType & USB_DIR_IN) { - musb->ep0_state = MUSB_EP0_STAGE_TX; - musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); - while ((musb_readw(regs, MUSB_CSR0) - & MUSB_CSR0_RXPKTRDY) != 0) - cpu_relax(); - musb->ackpend = 0; - } else - musb->ep0_state = MUSB_EP0_STAGE_RX; -} - -static int -forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) -__releases(musb->lock) -__acquires(musb->lock) -{ - int retval; - if (!musb->gadget_driver) - return -EOPNOTSUPP; - spin_unlock(&musb->lock); - retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); - spin_lock(&musb->lock); - return retval; -} - -/* - * Handle peripheral ep0 interrupt - * - * Context: irq handler; we won't re-enter the driver that way. - */ -irqreturn_t musb_g_ep0_irq(struct musb *musb) -{ - u16 csr; - u16 len; - void __iomem *mbase = musb->mregs; - void __iomem *regs = musb->endpoints[0].regs; - irqreturn_t retval = IRQ_NONE; - - musb_ep_select(mbase, 0); /* select ep0 */ - csr = musb_readw(regs, MUSB_CSR0); - len = musb_readb(regs, MUSB_COUNT0); - - DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n", - csr, len, - musb_readb(mbase, MUSB_FADDR), - decode_ep0stage(musb->ep0_state)); - - /* I sent a stall.. need to acknowledge it now.. */ - if (csr & MUSB_CSR0_P_SENTSTALL) { - musb_writew(regs, MUSB_CSR0, - csr & ~MUSB_CSR0_P_SENTSTALL); - retval = IRQ_HANDLED; - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - csr = musb_readw(regs, MUSB_CSR0); - } - - /* request ended "early" */ - if (csr & MUSB_CSR0_P_SETUPEND) { - musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); - retval = IRQ_HANDLED; - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - csr = musb_readw(regs, MUSB_CSR0); - /* NOTE: request may need completion */ - } - - /* docs from Mentor only describe tx, rx, and idle/setup states. - * we need to handle nuances around status stages, and also the - * case where status and setup stages come back-to-back ... - */ - switch (musb->ep0_state) { - - case MUSB_EP0_STAGE_TX: - /* irq on clearing txpktrdy */ - if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { - ep0_txstate(musb); - retval = IRQ_HANDLED; - } - break; - - case MUSB_EP0_STAGE_RX: - /* irq on set rxpktrdy */ - if (csr & MUSB_CSR0_RXPKTRDY) { - ep0_rxstate(musb); - retval = IRQ_HANDLED; - } - break; - - case MUSB_EP0_STAGE_STATUSIN: - /* end of sequence #2 (OUT/RX state) or #3 (no data) */ - - /* update address (if needed) only @ the end of the - * status phase per usb spec, which also guarantees - * we get 10 msec to receive this irq... until this - * is done we won't see the next packet. - */ - if (musb->set_address) { - musb->set_address = false; - musb_writeb(mbase, MUSB_FADDR, musb->address); - } - - /* enter test mode if needed (exit by reset) */ - else if (musb->test_mode) { - DBG(1, "entering TESTMODE\n"); - - if (MUSB_TEST_PACKET == musb->test_mode_nr) - musb_load_testpacket(musb); - - musb_writeb(mbase, MUSB_TESTMODE, - musb->test_mode_nr); - } - /* FALLTHROUGH */ - - case MUSB_EP0_STAGE_STATUSOUT: - /* end of sequence #1: write to host (TX state) */ - { - struct usb_request *req; - - req = next_ep0_request(musb); - if (req) - musb_g_ep0_giveback(musb, req); - } - retval = IRQ_HANDLED; - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - /* FALLTHROUGH */ - - case MUSB_EP0_STAGE_SETUP: - if (csr & MUSB_CSR0_RXPKTRDY) { - struct usb_ctrlrequest setup; - int handled = 0; - - if (len != 8) { - ERR("SETUP packet len %d != 8 ?\n", len); - break; - } - musb_read_setup(musb, &setup); - retval = IRQ_HANDLED; - - /* sometimes the RESET won't be reported */ - if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { - u8 power; - - printk(KERN_NOTICE "%s: peripheral reset " - "irq lost!\n", - musb_driver_name); - power = musb_readb(mbase, MUSB_POWER); - musb->g.speed = (power & MUSB_POWER_HSMODE) - ? USB_SPEED_HIGH : USB_SPEED_FULL; - - } - - switch (musb->ep0_state) { - - /* sequence #3 (no data stage), includes requests - * we can't forward (notably SET_ADDRESS and the - * device/endpoint feature set/clear operations) - * plus SET_CONFIGURATION and others we must - */ - case MUSB_EP0_STAGE_ACKWAIT: - handled = service_zero_data_request( - musb, &setup); - - /* status stage might be immediate */ - if (handled > 0) { - musb->ackpend |= MUSB_CSR0_P_DATAEND; - musb->ep0_state = - MUSB_EP0_STAGE_STATUSIN; - } - break; - - /* sequence #1 (IN to host), includes GET_STATUS - * requests that we can't forward, GET_DESCRIPTOR - * and others that we must - */ - case MUSB_EP0_STAGE_TX: - handled = service_in_request(musb, &setup); - if (handled > 0) { - musb->ackpend = MUSB_CSR0_TXPKTRDY - | MUSB_CSR0_P_DATAEND; - musb->ep0_state = - MUSB_EP0_STAGE_STATUSOUT; - } - break; - - /* sequence #2 (OUT from host), always forward */ - default: /* MUSB_EP0_STAGE_RX */ - break; - } - - DBG(3, "handled %d, csr %04x, ep0stage %s\n", - handled, csr, - decode_ep0stage(musb->ep0_state)); - - /* unless we need to delegate this to the gadget - * driver, we know how to wrap this up: csr0 has - * not yet been written. - */ - if (handled < 0) - goto stall; - else if (handled > 0) - goto finish; - - handled = forward_to_driver(musb, &setup); - if (handled < 0) { - musb_ep_select(mbase, 0); -stall: - DBG(3, "stall (%d)\n", handled); - musb->ackpend |= MUSB_CSR0_P_SENDSTALL; - musb->ep0_state = MUSB_EP0_STAGE_SETUP; -finish: - musb_writew(regs, MUSB_CSR0, - musb->ackpend); - musb->ackpend = 0; - } - } - break; - - case MUSB_EP0_STAGE_ACKWAIT: - /* This should not happen. But happens with tusb6010 with - * g_file_storage and high speed. Do nothing. - */ - retval = IRQ_HANDLED; - break; - - default: - /* "can't happen" */ - WARN_ON(1); - musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - break; - } - - return retval; -} - - -static int -musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) -{ - /* always enabled */ - return -EINVAL; -} - -static int musb_g_ep0_disable(struct usb_ep *e) -{ - /* always enabled */ - return -EINVAL; -} - -static int -musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) -{ - struct musb_ep *ep; - struct musb_request *req; - struct musb *musb; - int status; - unsigned long lockflags; - void __iomem *regs; - - if (!e || !r) - return -EINVAL; - - ep = to_musb_ep(e); - musb = ep->musb; - regs = musb->control_ep->regs; - - req = to_musb_request(r); - req->musb = musb; - req->request.actual = 0; - req->request.status = -EINPROGRESS; - req->tx = ep->is_in; - - spin_lock_irqsave(&musb->lock, lockflags); - - if (!list_empty(&ep->req_list)) { - status = -EBUSY; - goto cleanup; - } - - switch (musb->ep0_state) { - case MUSB_EP0_STAGE_RX: /* control-OUT data */ - case MUSB_EP0_STAGE_TX: /* control-IN data */ - case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ - status = 0; - break; - default: - DBG(1, "ep0 request queued in state %d\n", - musb->ep0_state); - status = -EINVAL; - goto cleanup; - } - - /* add request to the list */ - list_add_tail(&(req->request.list), &(ep->req_list)); - - DBG(3, "queue to %s (%s), length=%d\n", - ep->name, ep->is_in ? "IN/TX" : "OUT/RX", - req->request.length); - - musb_ep_select(musb->mregs, 0); - - /* sequence #1, IN ... start writing the data */ - if (musb->ep0_state == MUSB_EP0_STAGE_TX) - ep0_txstate(musb); - - /* sequence #3, no-data ... issue IN status */ - else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { - if (req->request.length) - status = -EINVAL; - else { - musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; - musb_writew(regs, MUSB_CSR0, - musb->ackpend | MUSB_CSR0_P_DATAEND); - musb->ackpend = 0; - musb_g_ep0_giveback(ep->musb, r); - } - - /* else for sequence #2 (OUT), caller provides a buffer - * before the next packet arrives. deferred responses - * (after SETUP is acked) are racey. - */ - } else if (musb->ackpend) { - musb_writew(regs, MUSB_CSR0, musb->ackpend); - musb->ackpend = 0; - } - -cleanup: - spin_unlock_irqrestore(&musb->lock, lockflags); - return status; -} - -static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) -{ - /* we just won't support this */ - return -EINVAL; -} - -static int musb_g_ep0_halt(struct usb_ep *e, int value) -{ - struct musb_ep *ep; - struct musb *musb; - void __iomem *base, *regs; - unsigned long flags; - int status; - u16 csr; - - if (!e || !value) - return -EINVAL; - - ep = to_musb_ep(e); - musb = ep->musb; - base = musb->mregs; - regs = musb->control_ep->regs; - status = 0; - - spin_lock_irqsave(&musb->lock, flags); - - if (!list_empty(&ep->req_list)) { - status = -EBUSY; - goto cleanup; - } - - musb_ep_select(base, 0); - csr = musb->ackpend; - - switch (musb->ep0_state) { - - /* Stalls are usually issued after parsing SETUP packet, either - * directly in irq context from setup() or else later. - */ - case MUSB_EP0_STAGE_TX: /* control-IN data */ - case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ - case MUSB_EP0_STAGE_RX: /* control-OUT data */ - csr = musb_readw(regs, MUSB_CSR0); - /* FALLTHROUGH */ - - /* It's also OK to issue stalls during callbacks when a non-empty - * DATA stage buffer has been read (or even written). - */ - case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ - case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ - - csr |= MUSB_CSR0_P_SENDSTALL; - musb_writew(regs, MUSB_CSR0, csr); - musb->ep0_state = MUSB_EP0_STAGE_SETUP; - musb->ackpend = 0; - break; - default: - DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state); - status = -EINVAL; - } - -cleanup: - spin_unlock_irqrestore(&musb->lock, flags); - return status; -} - -const struct usb_ep_ops musb_g_ep0_ops = { - .enable = musb_g_ep0_enable, - .disable = musb_g_ep0_disable, - .alloc_request = musb_alloc_request, - .free_request = musb_free_request, - .queue = musb_g_ep0_queue, - .dequeue = musb_g_ep0_dequeue, - .set_halt = musb_g_ep0_halt, -}; diff --git a/trunk/drivers/usb/musb/musb_host.c b/trunk/drivers/usb/musb/musb_host.c deleted file mode 100644 index 8b4be012669a..000000000000 --- a/trunk/drivers/usb/musb/musb_host.c +++ /dev/null @@ -1,2170 +0,0 @@ -/* - * MUSB OTG driver host support - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" -#include "musb_host.h" - - -/* MUSB HOST status 22-mar-2006 - * - * - There's still lots of partial code duplication for fault paths, so - * they aren't handled as consistently as they need to be. - * - * - PIO mostly behaved when last tested. - * + including ep0, with all usbtest cases 9, 10 - * + usbtest 14 (ep0out) doesn't seem to run at all - * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest - * configurations, but otherwise double buffering passes basic tests. - * + for 2.6.N, for N > ~10, needs API changes for hcd framework. - * - * - DMA (CPPI) ... partially behaves, not currently recommended - * + about 1/15 the speed of typical EHCI implementations (PCI) - * + RX, all too often reqpkt seems to misbehave after tx - * + TX, no known issues (other than evident silicon issue) - * - * - DMA (Mentor/OMAP) ...has at least toggle update problems - * - * - Still no traffic scheduling code to make NAKing for bulk or control - * transfers unable to starve other requests; or to make efficient use - * of hardware with periodic transfers. (Note that network drivers - * commonly post bulk reads that stay pending for a long time; these - * would make very visible trouble.) - * - * - Not tested with HNP, but some SRP paths seem to behave. - * - * NOTE 24-August-2006: - * - * - Bulk traffic finally uses both sides of hardware ep1, freeing up an - * extra endpoint for periodic use enabling hub + keybd + mouse. That - * mostly works, except that with "usbnet" it's easy to trigger cases - * with "ping" where RX loses. (a) ping to davinci, even "ping -f", - * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses - * although ARP RX wins. (That test was done with a full speed link.) - */ - - -/* - * NOTE on endpoint usage: - * - * CONTROL transfers all go through ep0. BULK ones go through dedicated IN - * and OUT endpoints ... hardware is dedicated for those "async" queue(s). - * - * (Yes, bulk _could_ use more of the endpoints than that, and would even - * benefit from it ... one remote device may easily be NAKing while others - * need to perform transfers in that same direction. The same thing could - * be done in software though, assuming dma cooperates.) - * - * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. - * So far that scheduling is both dumb and optimistic: the endpoint will be - * "claimed" until its software queue is no longer refilled. No multiplexing - * of transfers between endpoints, or anything clever. - */ - - -static void musb_ep_program(struct musb *musb, u8 epnum, - struct urb *urb, unsigned int nOut, - u8 *buf, u32 len); - -/* - * Clear TX fifo. Needed to avoid BABBLE errors. - */ -static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) -{ - void __iomem *epio = ep->regs; - u16 csr; - int retries = 1000; - - csr = musb_readw(epio, MUSB_TXCSR); - while (csr & MUSB_TXCSR_FIFONOTEMPTY) { - DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); - csr |= MUSB_TXCSR_FLUSHFIFO; - musb_writew(epio, MUSB_TXCSR, csr); - csr = musb_readw(epio, MUSB_TXCSR); - if (retries-- < 1) { - ERR("Could not flush host TX fifo: csr: %04x\n", csr); - return; - } - mdelay(1); - } -} - -/* - * Start transmit. Caller is responsible for locking shared resources. - * musb must be locked. - */ -static inline void musb_h_tx_start(struct musb_hw_ep *ep) -{ - u16 txcsr; - - /* NOTE: no locks here; caller should lock and select EP */ - if (ep->epnum) { - txcsr = musb_readw(ep->regs, MUSB_TXCSR); - txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; - musb_writew(ep->regs, MUSB_TXCSR, txcsr); - } else { - txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; - musb_writew(ep->regs, MUSB_CSR0, txcsr); - } - -} - -static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) -{ - u16 txcsr; - - /* NOTE: no locks here; caller should lock and select EP */ - txcsr = musb_readw(ep->regs, MUSB_TXCSR); - txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; - musb_writew(ep->regs, MUSB_TXCSR, txcsr); -} - -/* - * Start the URB at the front of an endpoint's queue - * end must be claimed from the caller. - * - * Context: controller locked, irqs blocked - */ -static void -musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) -{ - u16 frame; - u32 len; - void *buf; - void __iomem *mbase = musb->mregs; - struct urb *urb = next_urb(qh); - struct musb_hw_ep *hw_ep = qh->hw_ep; - unsigned pipe = urb->pipe; - u8 address = usb_pipedevice(pipe); - int epnum = hw_ep->epnum; - - /* initialize software qh state */ - qh->offset = 0; - qh->segsize = 0; - - /* gather right source of data */ - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: - /* control transfers always start with SETUP */ - is_in = 0; - hw_ep->out_qh = qh; - musb->ep0_stage = MUSB_EP0_START; - buf = urb->setup_packet; - len = 8; - break; - case USB_ENDPOINT_XFER_ISOC: - qh->iso_idx = 0; - qh->frame = 0; - buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; - len = urb->iso_frame_desc[0].length; - break; - default: /* bulk, interrupt */ - buf = urb->transfer_buffer; - len = urb->transfer_buffer_length; - } - - DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", - qh, urb, address, qh->epnum, - is_in ? "in" : "out", - ({char *s; switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: s = ""; break; - case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; - case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; - default: s = "-intr"; break; - }; s; }), - epnum, buf, len); - - /* Configure endpoint */ - if (is_in || hw_ep->is_shared_fifo) - hw_ep->in_qh = qh; - else - hw_ep->out_qh = qh; - musb_ep_program(musb, epnum, urb, !is_in, buf, len); - - /* transmit may have more work: start it when it is time */ - if (is_in) - return; - - /* determine if the time is right for a periodic transfer */ - switch (qh->type) { - case USB_ENDPOINT_XFER_ISOC: - case USB_ENDPOINT_XFER_INT: - DBG(3, "check whether there's still time for periodic Tx\n"); - qh->iso_idx = 0; - frame = musb_readw(mbase, MUSB_FRAME); - /* FIXME this doesn't implement that scheduling policy ... - * or handle framecounter wrapping - */ - if ((urb->transfer_flags & URB_ISO_ASAP) - || (frame >= urb->start_frame)) { - /* REVISIT the SOF irq handler shouldn't duplicate - * this code; and we don't init urb->start_frame... - */ - qh->frame = 0; - goto start; - } else { - qh->frame = urb->start_frame; - /* enable SOF interrupt so we can count down */ - DBG(1, "SOF for %d\n", epnum); -#if 1 /* ifndef CONFIG_ARCH_DAVINCI */ - musb_writeb(mbase, MUSB_INTRUSBE, 0xff); -#endif - } - break; - default: -start: - DBG(4, "Start TX%d %s\n", epnum, - hw_ep->tx_channel ? "dma" : "pio"); - - if (!hw_ep->tx_channel) - musb_h_tx_start(hw_ep); - else if (is_cppi_enabled() || tusb_dma_omap()) - cppi_host_txdma_start(hw_ep); - } -} - -/* caller owns controller lock, irqs are blocked */ -static void -__musb_giveback(struct musb *musb, struct urb *urb, int status) -__releases(musb->lock) -__acquires(musb->lock) -{ - DBG(({ int level; switch (urb->status) { - case 0: - level = 4; - break; - /* common/boring faults */ - case -EREMOTEIO: - case -ESHUTDOWN: - case -ECONNRESET: - case -EPIPE: - level = 3; - break; - default: - level = 2; - break; - }; level; }), - "complete %p (%d), dev%d ep%d%s, %d/%d\n", - urb, urb->status, - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - urb->actual_length, urb->transfer_buffer_length - ); - - spin_unlock(&musb->lock); - usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); - spin_lock(&musb->lock); -} - -/* for bulk/interrupt endpoints only */ -static inline void -musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) -{ - struct usb_device *udev = urb->dev; - u16 csr; - void __iomem *epio = ep->regs; - struct musb_qh *qh; - - /* FIXME: the current Mentor DMA code seems to have - * problems getting toggle correct. - */ - - if (is_in || ep->is_shared_fifo) - qh = ep->in_qh; - else - qh = ep->out_qh; - - if (!is_in) { - csr = musb_readw(epio, MUSB_TXCSR); - usb_settoggle(udev, qh->epnum, 1, - (csr & MUSB_TXCSR_H_DATATOGGLE) - ? 1 : 0); - } else { - csr = musb_readw(epio, MUSB_RXCSR); - usb_settoggle(udev, qh->epnum, 0, - (csr & MUSB_RXCSR_H_DATATOGGLE) - ? 1 : 0); - } -} - -/* caller owns controller lock, irqs are blocked */ -static struct musb_qh * -musb_giveback(struct musb_qh *qh, struct urb *urb, int status) -{ - int is_in; - struct musb_hw_ep *ep = qh->hw_ep; - struct musb *musb = ep->musb; - int ready = qh->is_ready; - - if (ep->is_shared_fifo) - is_in = 1; - else - is_in = usb_pipein(urb->pipe); - - /* save toggle eagerly, for paranoia */ - switch (qh->type) { - case USB_ENDPOINT_XFER_BULK: - case USB_ENDPOINT_XFER_INT: - musb_save_toggle(ep, is_in, urb); - break; - case USB_ENDPOINT_XFER_ISOC: - if (status == 0 && urb->error_count) - status = -EXDEV; - break; - } - - usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); - - qh->is_ready = 0; - __musb_giveback(musb, urb, status); - qh->is_ready = ready; - - /* reclaim resources (and bandwidth) ASAP; deschedule it, and - * invalidate qh as soon as list_empty(&hep->urb_list) - */ - if (list_empty(&qh->hep->urb_list)) { - struct list_head *head; - - if (is_in) - ep->rx_reinit = 1; - else - ep->tx_reinit = 1; - - /* clobber old pointers to this qh */ - if (is_in || ep->is_shared_fifo) - ep->in_qh = NULL; - else - ep->out_qh = NULL; - qh->hep->hcpriv = NULL; - - switch (qh->type) { - - case USB_ENDPOINT_XFER_ISOC: - case USB_ENDPOINT_XFER_INT: - /* this is where periodic bandwidth should be - * de-allocated if it's tracked and allocated; - * and where we'd update the schedule tree... - */ - musb->periodic[ep->epnum] = NULL; - kfree(qh); - qh = NULL; - break; - - case USB_ENDPOINT_XFER_CONTROL: - case USB_ENDPOINT_XFER_BULK: - /* fifo policy for these lists, except that NAKing - * should rotate a qh to the end (for fairness). - */ - head = qh->ring.prev; - list_del(&qh->ring); - kfree(qh); - qh = first_qh(head); - break; - } - } - return qh; -} - -/* - * Advance this hardware endpoint's queue, completing the specified urb and - * advancing to either the next urb queued to that qh, or else invalidating - * that qh and advancing to the next qh scheduled after the current one. - * - * Context: caller owns controller lock, irqs are blocked - */ -static void -musb_advance_schedule(struct musb *musb, struct urb *urb, - struct musb_hw_ep *hw_ep, int is_in) -{ - struct musb_qh *qh; - - if (is_in || hw_ep->is_shared_fifo) - qh = hw_ep->in_qh; - else - qh = hw_ep->out_qh; - - if (urb->status == -EINPROGRESS) - qh = musb_giveback(qh, urb, 0); - else - qh = musb_giveback(qh, urb, urb->status); - - if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { - DBG(4, "... next ep%d %cX urb %p\n", - hw_ep->epnum, is_in ? 'R' : 'T', - next_urb(qh)); - musb_start_urb(musb, is_in, qh); - } -} - -static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) -{ - /* we don't want fifo to fill itself again; - * ignore dma (various models), - * leave toggle alone (may not have been saved yet) - */ - csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; - csr &= ~(MUSB_RXCSR_H_REQPKT - | MUSB_RXCSR_H_AUTOREQ - | MUSB_RXCSR_AUTOCLEAR); - - /* write 2x to allow double buffering */ - musb_writew(hw_ep->regs, MUSB_RXCSR, csr); - musb_writew(hw_ep->regs, MUSB_RXCSR, csr); - - /* flush writebuffer */ - return musb_readw(hw_ep->regs, MUSB_RXCSR); -} - -/* - * PIO RX for a packet (or part of it). - */ -static bool -musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) -{ - u16 rx_count; - u8 *buf; - u16 csr; - bool done = false; - u32 length; - int do_flush = 0; - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - void __iomem *epio = hw_ep->regs; - struct musb_qh *qh = hw_ep->in_qh; - int pipe = urb->pipe; - void *buffer = urb->transfer_buffer; - - /* musb_ep_select(mbase, epnum); */ - rx_count = musb_readw(epio, MUSB_RXCOUNT); - DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, - urb->transfer_buffer, qh->offset, - urb->transfer_buffer_length); - - /* unload FIFO */ - if (usb_pipeisoc(pipe)) { - int status = 0; - struct usb_iso_packet_descriptor *d; - - if (iso_err) { - status = -EILSEQ; - urb->error_count++; - } - - d = urb->iso_frame_desc + qh->iso_idx; - buf = buffer + d->offset; - length = d->length; - if (rx_count > length) { - if (status == 0) { - status = -EOVERFLOW; - urb->error_count++; - } - DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); - do_flush = 1; - } else - length = rx_count; - urb->actual_length += length; - d->actual_length = length; - - d->status = status; - - /* see if we are done */ - done = (++qh->iso_idx >= urb->number_of_packets); - } else { - /* non-isoch */ - buf = buffer + qh->offset; - length = urb->transfer_buffer_length - qh->offset; - if (rx_count > length) { - if (urb->status == -EINPROGRESS) - urb->status = -EOVERFLOW; - DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); - do_flush = 1; - } else - length = rx_count; - urb->actual_length += length; - qh->offset += length; - - /* see if we are done */ - done = (urb->actual_length == urb->transfer_buffer_length) - || (rx_count < qh->maxpacket) - || (urb->status != -EINPROGRESS); - if (done - && (urb->status == -EINPROGRESS) - && (urb->transfer_flags & URB_SHORT_NOT_OK) - && (urb->actual_length - < urb->transfer_buffer_length)) - urb->status = -EREMOTEIO; - } - - musb_read_fifo(hw_ep, length, buf); - - csr = musb_readw(epio, MUSB_RXCSR); - csr |= MUSB_RXCSR_H_WZC_BITS; - if (unlikely(do_flush)) - musb_h_flush_rxfifo(hw_ep, csr); - else { - /* REVISIT this assumes AUTOCLEAR is never set */ - csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); - if (!done) - csr |= MUSB_RXCSR_H_REQPKT; - musb_writew(epio, MUSB_RXCSR, csr); - } - - return done; -} - -/* we don't always need to reinit a given side of an endpoint... - * when we do, use tx/rx reinit routine and then construct a new CSR - * to address data toggle, NYET, and DMA or PIO. - * - * it's possible that driver bugs (especially for DMA) or aborting a - * transfer might have left the endpoint busier than it should be. - * the busy/not-empty tests are basically paranoia. - */ -static void -musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) -{ - u16 csr; - - /* NOTE: we know the "rx" fifo reinit never triggers for ep0. - * That always uses tx_reinit since ep0 repurposes TX register - * offsets; the initial SETUP packet is also a kind of OUT. - */ - - /* if programmed for Tx, put it in RX mode */ - if (ep->is_shared_fifo) { - csr = musb_readw(ep->regs, MUSB_TXCSR); - if (csr & MUSB_TXCSR_MODE) { - musb_h_tx_flush_fifo(ep); - musb_writew(ep->regs, MUSB_TXCSR, - MUSB_TXCSR_FRCDATATOG); - } - /* clear mode (and everything else) to enable Rx */ - musb_writew(ep->regs, MUSB_TXCSR, 0); - - /* scrub all previous state, clearing toggle */ - } else { - csr = musb_readw(ep->regs, MUSB_RXCSR); - if (csr & MUSB_RXCSR_RXPKTRDY) - WARNING("rx%d, packet/%d ready?\n", ep->epnum, - musb_readw(ep->regs, MUSB_RXCOUNT)); - - musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); - } - - /* target addr and (for multipoint) hub addr/port */ - if (musb->is_multipoint) { - musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, - qh->addr_reg); - musb_writeb(ep->target_regs, MUSB_RXHUBADDR, - qh->h_addr_reg); - musb_writeb(ep->target_regs, MUSB_RXHUBPORT, - qh->h_port_reg); - } else - musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); - - /* protocol/endpoint, interval/NAKlimit, i/o size */ - musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); - musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); - /* NOTE: bulk combining rewrites high bits of maxpacket */ - musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); - - ep->rx_reinit = 0; -} - - -/* - * Program an HDRC endpoint as per the given URB - * Context: irqs blocked, controller lock held - */ -static void musb_ep_program(struct musb *musb, u8 epnum, - struct urb *urb, unsigned int is_out, - u8 *buf, u32 len) -{ - struct dma_controller *dma_controller; - struct dma_channel *dma_channel; - u8 dma_ok; - void __iomem *mbase = musb->mregs; - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - void __iomem *epio = hw_ep->regs; - struct musb_qh *qh; - u16 packet_sz; - - if (!is_out || hw_ep->is_shared_fifo) - qh = hw_ep->in_qh; - else - qh = hw_ep->out_qh; - - packet_sz = qh->maxpacket; - - DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " - "h_addr%02x h_port%02x bytes %d\n", - is_out ? "-->" : "<--", - epnum, urb, urb->dev->speed, - qh->addr_reg, qh->epnum, is_out ? "out" : "in", - qh->h_addr_reg, qh->h_port_reg, - len); - - musb_ep_select(mbase, epnum); - - /* candidate for DMA? */ - dma_controller = musb->dma_controller; - if (is_dma_capable() && epnum && dma_controller) { - dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; - if (!dma_channel) { - dma_channel = dma_controller->channel_alloc( - dma_controller, hw_ep, is_out); - if (is_out) - hw_ep->tx_channel = dma_channel; - else - hw_ep->rx_channel = dma_channel; - } - } else - dma_channel = NULL; - - /* make sure we clear DMAEnab, autoSet bits from previous run */ - - /* OUT/transmit/EP0 or IN/receive? */ - if (is_out) { - u16 csr; - u16 int_txe; - u16 load_count; - - csr = musb_readw(epio, MUSB_TXCSR); - - /* disable interrupt in case we flush */ - int_txe = musb_readw(mbase, MUSB_INTRTXE); - musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); - - /* general endpoint setup */ - if (epnum) { - /* ASSERT: TXCSR_DMAENAB was already cleared */ - - /* flush all old state, set default */ - musb_h_tx_flush_fifo(hw_ep); - csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_FRCDATATOG - | MUSB_TXCSR_H_RXSTALL - | MUSB_TXCSR_H_ERROR - | MUSB_TXCSR_TXPKTRDY - ); - csr |= MUSB_TXCSR_MODE; - - if (usb_gettoggle(urb->dev, - qh->epnum, 1)) - csr |= MUSB_TXCSR_H_WR_DATATOGGLE - | MUSB_TXCSR_H_DATATOGGLE; - else - csr |= MUSB_TXCSR_CLRDATATOG; - - /* twice in case of double packet buffering */ - musb_writew(epio, MUSB_TXCSR, csr); - /* REVISIT may need to clear FLUSHFIFO ... */ - musb_writew(epio, MUSB_TXCSR, csr); - csr = musb_readw(epio, MUSB_TXCSR); - } else { - /* endpoint 0: just flush */ - musb_writew(epio, MUSB_CSR0, - csr | MUSB_CSR0_FLUSHFIFO); - musb_writew(epio, MUSB_CSR0, - csr | MUSB_CSR0_FLUSHFIFO); - } - - /* target addr and (for multipoint) hub addr/port */ - if (musb->is_multipoint) { - musb_writeb(mbase, - MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), - qh->addr_reg); - musb_writeb(mbase, - MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), - qh->h_addr_reg); - musb_writeb(mbase, - MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), - qh->h_port_reg); -/* FIXME if !epnum, do the same for RX ... */ - } else - musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); - - /* protocol/endpoint/interval/NAKlimit */ - if (epnum) { - musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); - if (can_bulk_split(musb, qh->type)) - musb_writew(epio, MUSB_TXMAXP, - packet_sz - | ((hw_ep->max_packet_sz_tx / - packet_sz) - 1) << 11); - else - musb_writew(epio, MUSB_TXMAXP, - packet_sz); - musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); - } else { - musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); - if (musb->is_multipoint) - musb_writeb(epio, MUSB_TYPE0, - qh->type_reg); - } - - if (can_bulk_split(musb, qh->type)) - load_count = min((u32) hw_ep->max_packet_sz_tx, - len); - else - load_count = min((u32) packet_sz, len); - -#ifdef CONFIG_USB_INVENTRA_DMA - if (dma_channel) { - - /* clear previous state */ - csr = musb_readw(epio, MUSB_TXCSR); - csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_DMAENAB); - csr |= MUSB_TXCSR_MODE; - musb_writew(epio, MUSB_TXCSR, - csr | MUSB_TXCSR_MODE); - - qh->segsize = min(len, dma_channel->max_len); - - if (qh->segsize <= packet_sz) - dma_channel->desired_mode = 0; - else - dma_channel->desired_mode = 1; - - - if (dma_channel->desired_mode == 0) { - csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAMODE); - csr |= (MUSB_TXCSR_DMAENAB); - /* against programming guide */ - } else - csr |= (MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_DMAMODE); - - musb_writew(epio, MUSB_TXCSR, csr); - - dma_ok = dma_controller->channel_program( - dma_channel, packet_sz, - dma_channel->desired_mode, - urb->transfer_dma, - qh->segsize); - if (dma_ok) { - load_count = 0; - } else { - dma_controller->channel_release(dma_channel); - if (is_out) - hw_ep->tx_channel = NULL; - else - hw_ep->rx_channel = NULL; - dma_channel = NULL; - } - } -#endif - - /* candidate for DMA */ - if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { - - /* program endpoint CSRs first, then setup DMA. - * assume CPPI setup succeeds. - * defer enabling dma. - */ - csr = musb_readw(epio, MUSB_TXCSR); - csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_DMAENAB); - csr |= MUSB_TXCSR_MODE; - musb_writew(epio, MUSB_TXCSR, - csr | MUSB_TXCSR_MODE); - - dma_channel->actual_len = 0L; - qh->segsize = len; - - /* TX uses "rndis" mode automatically, but needs help - * to identify the zero-length-final-packet case. - */ - dma_ok = dma_controller->channel_program( - dma_channel, packet_sz, - (urb->transfer_flags - & URB_ZERO_PACKET) - == URB_ZERO_PACKET, - urb->transfer_dma, - qh->segsize); - if (dma_ok) { - load_count = 0; - } else { - dma_controller->channel_release(dma_channel); - hw_ep->tx_channel = NULL; - dma_channel = NULL; - - /* REVISIT there's an error path here that - * needs handling: can't do dma, but - * there's no pio buffer address... - */ - } - } - - if (load_count) { - /* ASSERT: TXCSR_DMAENAB was already cleared */ - - /* PIO to load FIFO */ - qh->segsize = load_count; - musb_write_fifo(hw_ep, load_count, buf); - csr = musb_readw(epio, MUSB_TXCSR); - csr &= ~(MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_DMAMODE - | MUSB_TXCSR_AUTOSET); - /* write CSR */ - csr |= MUSB_TXCSR_MODE; - - if (epnum) - musb_writew(epio, MUSB_TXCSR, csr); - } - - /* re-enable interrupt */ - musb_writew(mbase, MUSB_INTRTXE, int_txe); - - /* IN/receive */ - } else { - u16 csr; - - if (hw_ep->rx_reinit) { - musb_rx_reinit(musb, qh, hw_ep); - - /* init new state: toggle and NYET, maybe DMA later */ - if (usb_gettoggle(urb->dev, qh->epnum, 0)) - csr = MUSB_RXCSR_H_WR_DATATOGGLE - | MUSB_RXCSR_H_DATATOGGLE; - else - csr = 0; - if (qh->type == USB_ENDPOINT_XFER_INT) - csr |= MUSB_RXCSR_DISNYET; - - } else { - csr = musb_readw(hw_ep->regs, MUSB_RXCSR); - - if (csr & (MUSB_RXCSR_RXPKTRDY - | MUSB_RXCSR_DMAENAB - | MUSB_RXCSR_H_REQPKT)) - ERR("broken !rx_reinit, ep%d csr %04x\n", - hw_ep->epnum, csr); - - /* scrub any stale state, leaving toggle alone */ - csr &= MUSB_RXCSR_DISNYET; - } - - /* kick things off */ - - if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { - /* candidate for DMA */ - if (dma_channel) { - dma_channel->actual_len = 0L; - qh->segsize = len; - - /* AUTOREQ is in a DMA register */ - musb_writew(hw_ep->regs, MUSB_RXCSR, csr); - csr = musb_readw(hw_ep->regs, - MUSB_RXCSR); - - /* unless caller treats short rx transfers as - * errors, we dare not queue multiple transfers. - */ - dma_ok = dma_controller->channel_program( - dma_channel, packet_sz, - !(urb->transfer_flags - & URB_SHORT_NOT_OK), - urb->transfer_dma, - qh->segsize); - if (!dma_ok) { - dma_controller->channel_release( - dma_channel); - hw_ep->rx_channel = NULL; - dma_channel = NULL; - } else - csr |= MUSB_RXCSR_DMAENAB; - } - } - - csr |= MUSB_RXCSR_H_REQPKT; - DBG(7, "RXCSR%d := %04x\n", epnum, csr); - musb_writew(hw_ep->regs, MUSB_RXCSR, csr); - csr = musb_readw(hw_ep->regs, MUSB_RXCSR); - } -} - - -/* - * Service the default endpoint (ep0) as host. - * Return true until it's time to start the status stage. - */ -static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) -{ - bool more = false; - u8 *fifo_dest = NULL; - u16 fifo_count = 0; - struct musb_hw_ep *hw_ep = musb->control_ep; - struct musb_qh *qh = hw_ep->in_qh; - struct usb_ctrlrequest *request; - - switch (musb->ep0_stage) { - case MUSB_EP0_IN: - fifo_dest = urb->transfer_buffer + urb->actual_length; - fifo_count = min(len, ((u16) (urb->transfer_buffer_length - - urb->actual_length))); - if (fifo_count < len) - urb->status = -EOVERFLOW; - - musb_read_fifo(hw_ep, fifo_count, fifo_dest); - - urb->actual_length += fifo_count; - if (len < qh->maxpacket) { - /* always terminate on short read; it's - * rarely reported as an error. - */ - } else if (urb->actual_length < - urb->transfer_buffer_length) - more = true; - break; - case MUSB_EP0_START: - request = (struct usb_ctrlrequest *) urb->setup_packet; - - if (!request->wLength) { - DBG(4, "start no-DATA\n"); - break; - } else if (request->bRequestType & USB_DIR_IN) { - DBG(4, "start IN-DATA\n"); - musb->ep0_stage = MUSB_EP0_IN; - more = true; - break; - } else { - DBG(4, "start OUT-DATA\n"); - musb->ep0_stage = MUSB_EP0_OUT; - more = true; - } - /* FALLTHROUGH */ - case MUSB_EP0_OUT: - fifo_count = min(qh->maxpacket, ((u16) - (urb->transfer_buffer_length - - urb->actual_length))); - - if (fifo_count) { - fifo_dest = (u8 *) (urb->transfer_buffer - + urb->actual_length); - DBG(3, "Sending %d bytes to %p\n", - fifo_count, fifo_dest); - musb_write_fifo(hw_ep, fifo_count, fifo_dest); - - urb->actual_length += fifo_count; - more = true; - } - break; - default: - ERR("bogus ep0 stage %d\n", musb->ep0_stage); - break; - } - - return more; -} - -/* - * Handle default endpoint interrupt as host. Only called in IRQ time - * from the LinuxIsr() interrupt service routine. - * - * called with controller irqlocked - */ -irqreturn_t musb_h_ep0_irq(struct musb *musb) -{ - struct urb *urb; - u16 csr, len; - int status = 0; - void __iomem *mbase = musb->mregs; - struct musb_hw_ep *hw_ep = musb->control_ep; - void __iomem *epio = hw_ep->regs; - struct musb_qh *qh = hw_ep->in_qh; - bool complete = false; - irqreturn_t retval = IRQ_NONE; - - /* ep0 only has one queue, "in" */ - urb = next_urb(qh); - - musb_ep_select(mbase, 0); - csr = musb_readw(epio, MUSB_CSR0); - len = (csr & MUSB_CSR0_RXPKTRDY) - ? musb_readb(epio, MUSB_COUNT0) - : 0; - - DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", - csr, qh, len, urb, musb->ep0_stage); - - /* if we just did status stage, we are done */ - if (MUSB_EP0_STATUS == musb->ep0_stage) { - retval = IRQ_HANDLED; - complete = true; - } - - /* prepare status */ - if (csr & MUSB_CSR0_H_RXSTALL) { - DBG(6, "STALLING ENDPOINT\n"); - status = -EPIPE; - - } else if (csr & MUSB_CSR0_H_ERROR) { - DBG(2, "no response, csr0 %04x\n", csr); - status = -EPROTO; - - } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { - DBG(2, "control NAK timeout\n"); - - /* NOTE: this code path would be a good place to PAUSE a - * control transfer, if another one is queued, so that - * ep0 is more likely to stay busy. - * - * if (qh->ring.next != &musb->control), then - * we have a candidate... NAKing is *NOT* an error - */ - musb_writew(epio, MUSB_CSR0, 0); - retval = IRQ_HANDLED; - } - - if (status) { - DBG(6, "aborting\n"); - retval = IRQ_HANDLED; - if (urb) - urb->status = status; - complete = true; - - /* use the proper sequence to abort the transfer */ - if (csr & MUSB_CSR0_H_REQPKT) { - csr &= ~MUSB_CSR0_H_REQPKT; - musb_writew(epio, MUSB_CSR0, csr); - csr &= ~MUSB_CSR0_H_NAKTIMEOUT; - musb_writew(epio, MUSB_CSR0, csr); - } else { - csr |= MUSB_CSR0_FLUSHFIFO; - musb_writew(epio, MUSB_CSR0, csr); - musb_writew(epio, MUSB_CSR0, csr); - csr &= ~MUSB_CSR0_H_NAKTIMEOUT; - musb_writew(epio, MUSB_CSR0, csr); - } - - musb_writeb(epio, MUSB_NAKLIMIT0, 0); - - /* clear it */ - musb_writew(epio, MUSB_CSR0, 0); - } - - if (unlikely(!urb)) { - /* stop endpoint since we have no place for its data, this - * SHOULD NEVER HAPPEN! */ - ERR("no URB for end 0\n"); - - musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); - musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); - musb_writew(epio, MUSB_CSR0, 0); - - goto done; - } - - if (!complete) { - /* call common logic and prepare response */ - if (musb_h_ep0_continue(musb, len, urb)) { - /* more packets required */ - csr = (MUSB_EP0_IN == musb->ep0_stage) - ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; - } else { - /* data transfer complete; perform status phase */ - if (usb_pipeout(urb->pipe) - || !urb->transfer_buffer_length) - csr = MUSB_CSR0_H_STATUSPKT - | MUSB_CSR0_H_REQPKT; - else - csr = MUSB_CSR0_H_STATUSPKT - | MUSB_CSR0_TXPKTRDY; - - /* flag status stage */ - musb->ep0_stage = MUSB_EP0_STATUS; - - DBG(5, "ep0 STATUS, csr %04x\n", csr); - - } - musb_writew(epio, MUSB_CSR0, csr); - retval = IRQ_HANDLED; - } else - musb->ep0_stage = MUSB_EP0_IDLE; - - /* call completion handler if done */ - if (complete) - musb_advance_schedule(musb, urb, hw_ep, 1); -done: - return retval; -} - - -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Host side TX (OUT) using Mentor DMA works as follows: - submit_urb -> - - if queue was empty, Program Endpoint - - ... which starts DMA to fifo in mode 1 or 0 - - DMA Isr (transfer complete) -> TxAvail() - - Stop DMA (~DmaEnab) (<--- Alert ... currently happens - only in musb_cleanup_urb) - - TxPktRdy has to be set in mode 0 or for - short packets in mode 1. -*/ - -#endif - -/* Service a Tx-Available or dma completion irq for the endpoint */ -void musb_host_tx(struct musb *musb, u8 epnum) -{ - int pipe; - bool done = false; - u16 tx_csr; - size_t wLength = 0; - u8 *buf = NULL; - struct urb *urb; - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - void __iomem *epio = hw_ep->regs; - struct musb_qh *qh = hw_ep->out_qh; - u32 status = 0; - void __iomem *mbase = musb->mregs; - struct dma_channel *dma; - - urb = next_urb(qh); - - musb_ep_select(mbase, epnum); - tx_csr = musb_readw(epio, MUSB_TXCSR); - - /* with CPPI, DMA sometimes triggers "extra" irqs */ - if (!urb) { - DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); - goto finish; - } - - pipe = urb->pipe; - dma = is_dma_capable() ? hw_ep->tx_channel : NULL; - DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, - dma ? ", dma" : ""); - - /* check for errors */ - if (tx_csr & MUSB_TXCSR_H_RXSTALL) { - /* dma was disabled, fifo flushed */ - DBG(3, "TX end %d stall\n", epnum); - - /* stall; record URB status */ - status = -EPIPE; - - } else if (tx_csr & MUSB_TXCSR_H_ERROR) { - /* (NON-ISO) dma was disabled, fifo flushed */ - DBG(3, "TX 3strikes on ep=%d\n", epnum); - - status = -ETIMEDOUT; - - } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { - DBG(6, "TX end=%d device not responding\n", epnum); - - /* NOTE: this code path would be a good place to PAUSE a - * transfer, if there's some other (nonperiodic) tx urb - * that could use this fifo. (dma complicates it...) - * - * if (bulk && qh->ring.next != &musb->out_bulk), then - * we have a candidate... NAKing is *NOT* an error - */ - musb_ep_select(mbase, epnum); - musb_writew(epio, MUSB_TXCSR, - MUSB_TXCSR_H_WZC_BITS - | MUSB_TXCSR_TXPKTRDY); - goto finish; - } - - if (status) { - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - dma->status = MUSB_DMA_STATUS_CORE_ABORT; - (void) musb->dma_controller->channel_abort(dma); - } - - /* do the proper sequence to abort the transfer in the - * usb core; the dma engine should already be stopped. - */ - musb_h_tx_flush_fifo(hw_ep); - tx_csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_H_ERROR - | MUSB_TXCSR_H_RXSTALL - | MUSB_TXCSR_H_NAKTIMEOUT - ); - - musb_ep_select(mbase, epnum); - musb_writew(epio, MUSB_TXCSR, tx_csr); - /* REVISIT may need to clear FLUSHFIFO ... */ - musb_writew(epio, MUSB_TXCSR, tx_csr); - musb_writeb(epio, MUSB_TXINTERVAL, 0); - - done = true; - } - - /* second cppi case */ - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); - goto finish; - - } - - /* REVISIT this looks wrong... */ - if (!status || dma || usb_pipeisoc(pipe)) { - if (dma) - wLength = dma->actual_len; - else - wLength = qh->segsize; - qh->offset += wLength; - - if (usb_pipeisoc(pipe)) { - struct usb_iso_packet_descriptor *d; - - d = urb->iso_frame_desc + qh->iso_idx; - d->actual_length = qh->segsize; - if (++qh->iso_idx >= urb->number_of_packets) { - done = true; - } else { - d++; - buf = urb->transfer_buffer + d->offset; - wLength = d->length; - } - } else if (dma) { - done = true; - } else { - /* see if we need to send more data, or ZLP */ - if (qh->segsize < qh->maxpacket) - done = true; - else if (qh->offset == urb->transfer_buffer_length - && !(urb->transfer_flags - & URB_ZERO_PACKET)) - done = true; - if (!done) { - buf = urb->transfer_buffer - + qh->offset; - wLength = urb->transfer_buffer_length - - qh->offset; - } - } - } - - /* urb->status != -EINPROGRESS means request has been faulted, - * so we must abort this transfer after cleanup - */ - if (urb->status != -EINPROGRESS) { - done = true; - if (status == 0) - status = urb->status; - } - - if (done) { - /* set status */ - urb->status = status; - urb->actual_length = qh->offset; - musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); - - } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { - /* WARN_ON(!buf); */ - - /* REVISIT: some docs say that when hw_ep->tx_double_buffered, - * (and presumably, fifo is not half-full) we should write TWO - * packets before updating TXCSR ... other docs disagree ... - */ - /* PIO: start next packet in this URB */ - wLength = min(qh->maxpacket, (u16) wLength); - musb_write_fifo(hw_ep, wLength, buf); - qh->segsize = wLength; - - musb_ep_select(mbase, epnum); - musb_writew(epio, MUSB_TXCSR, - MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); - } else - DBG(1, "not complete, but dma enabled?\n"); - -finish: - return; -} - - -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Host side RX (IN) using Mentor DMA works as follows: - submit_urb -> - - if queue was empty, ProgramEndpoint - - first IN token is sent out (by setting ReqPkt) - LinuxIsr -> RxReady() - /\ => first packet is received - | - Set in mode 0 (DmaEnab, ~ReqPkt) - | -> DMA Isr (transfer complete) -> RxReady() - | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) - | - if urb not complete, send next IN token (ReqPkt) - | | else complete urb. - | | - --------------------------- - * - * Nuances of mode 1: - * For short packets, no ack (+RxPktRdy) is sent automatically - * (even if AutoClear is ON) - * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent - * automatically => major problem, as collecting the next packet becomes - * difficult. Hence mode 1 is not used. - * - * REVISIT - * All we care about at this driver level is that - * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; - * (b) termination conditions are: short RX, or buffer full; - * (c) fault modes include - * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. - * (and that endpoint's dma queue stops immediately) - * - overflow (full, PLUS more bytes in the terminal packet) - * - * So for example, usb-storage sets URB_SHORT_NOT_OK, and would - * thus be a great candidate for using mode 1 ... for all but the - * last packet of one URB's transfer. - */ - -#endif - -/* - * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, - * and high-bandwidth IN transfer cases. - */ -void musb_host_rx(struct musb *musb, u8 epnum) -{ - struct urb *urb; - struct musb_hw_ep *hw_ep = musb->endpoints + epnum; - void __iomem *epio = hw_ep->regs; - struct musb_qh *qh = hw_ep->in_qh; - size_t xfer_len; - void __iomem *mbase = musb->mregs; - int pipe; - u16 rx_csr, val; - bool iso_err = false; - bool done = false; - u32 status; - struct dma_channel *dma; - - musb_ep_select(mbase, epnum); - - urb = next_urb(qh); - dma = is_dma_capable() ? hw_ep->rx_channel : NULL; - status = 0; - xfer_len = 0; - - rx_csr = musb_readw(epio, MUSB_RXCSR); - val = rx_csr; - - if (unlikely(!urb)) { - /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least - * usbtest #11 (unlinks) triggers it regularly, sometimes - * with fifo full. (Only with DMA??) - */ - DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, - musb_readw(epio, MUSB_RXCOUNT)); - musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); - return; - } - - pipe = urb->pipe; - - DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", - epnum, rx_csr, urb->actual_length, - dma ? dma->actual_len : 0); - - /* check for errors, concurrent stall & unlink is not really - * handled yet! */ - if (rx_csr & MUSB_RXCSR_H_RXSTALL) { - DBG(3, "RX end %d STALL\n", epnum); - - /* stall; record URB status */ - status = -EPIPE; - - } else if (rx_csr & MUSB_RXCSR_H_ERROR) { - DBG(3, "end %d RX proto error\n", epnum); - - status = -EPROTO; - musb_writeb(epio, MUSB_RXINTERVAL, 0); - - } else if (rx_csr & MUSB_RXCSR_DATAERROR) { - - if (USB_ENDPOINT_XFER_ISOC != qh->type) { - /* NOTE this code path would be a good place to PAUSE a - * transfer, if there's some other (nonperiodic) rx urb - * that could use this fifo. (dma complicates it...) - * - * if (bulk && qh->ring.next != &musb->in_bulk), then - * we have a candidate... NAKing is *NOT* an error - */ - DBG(6, "RX end %d NAK timeout\n", epnum); - musb_ep_select(mbase, epnum); - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS - | MUSB_RXCSR_H_REQPKT); - - goto finish; - } else { - DBG(4, "RX end %d ISO data error\n", epnum); - /* packet error reported later */ - iso_err = true; - } - } - - /* faults abort the transfer */ - if (status) { - /* clean up dma and collect transfer count */ - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - dma->status = MUSB_DMA_STATUS_CORE_ABORT; - (void) musb->dma_controller->channel_abort(dma); - xfer_len = dma->actual_len; - } - musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); - musb_writeb(epio, MUSB_RXINTERVAL, 0); - done = true; - goto finish; - } - - if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { - /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ - ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); - goto finish; - } - - /* thorough shutdown for now ... given more precise fault handling - * and better queueing support, we might keep a DMA pipeline going - * while processing this irq for earlier completions. - */ - - /* FIXME this is _way_ too much in-line logic for Mentor DMA */ - -#ifndef CONFIG_USB_INVENTRA_DMA - if (rx_csr & MUSB_RXCSR_H_REQPKT) { - /* REVISIT this happened for a while on some short reads... - * the cleanup still needs investigation... looks bad... - * and also duplicates dma cleanup code above ... plus, - * shouldn't this be the "half full" double buffer case? - */ - if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { - dma->status = MUSB_DMA_STATUS_CORE_ABORT; - (void) musb->dma_controller->channel_abort(dma); - xfer_len = dma->actual_len; - done = true; - } - - DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, - xfer_len, dma ? ", dma" : ""); - rx_csr &= ~MUSB_RXCSR_H_REQPKT; - - musb_ep_select(mbase, epnum); - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | rx_csr); - } -#endif - if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { - xfer_len = dma->actual_len; - - val &= ~(MUSB_RXCSR_DMAENAB - | MUSB_RXCSR_H_AUTOREQ - | MUSB_RXCSR_AUTOCLEAR - | MUSB_RXCSR_RXPKTRDY); - musb_writew(hw_ep->regs, MUSB_RXCSR, val); - -#ifdef CONFIG_USB_INVENTRA_DMA - /* done if urb buffer is full or short packet is recd */ - done = (urb->actual_length + xfer_len >= - urb->transfer_buffer_length - || dma->actual_len < qh->maxpacket); - - /* send IN token for next packet, without AUTOREQ */ - if (!done) { - val |= MUSB_RXCSR_H_REQPKT; - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | val); - } - - DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, - done ? "off" : "reset", - musb_readw(epio, MUSB_RXCSR), - musb_readw(epio, MUSB_RXCOUNT)); -#else - done = true; -#endif - } else if (urb->status == -EINPROGRESS) { - /* if no errors, be sure a packet is ready for unloading */ - if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { - status = -EPROTO; - ERR("Rx interrupt with no errors or packet!\n"); - - /* FIXME this is another "SHOULD NEVER HAPPEN" */ - -/* SCRUB (RX) */ - /* do the proper sequence to abort the transfer */ - musb_ep_select(mbase, epnum); - val &= ~MUSB_RXCSR_H_REQPKT; - musb_writew(epio, MUSB_RXCSR, val); - goto finish; - } - - /* we are expecting IN packets */ -#ifdef CONFIG_USB_INVENTRA_DMA - if (dma) { - struct dma_controller *c; - u16 rx_count; - int ret; - - rx_count = musb_readw(epio, MUSB_RXCOUNT); - - DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", - epnum, rx_count, - urb->transfer_dma - + urb->actual_length, - qh->offset, - urb->transfer_buffer_length); - - c = musb->dma_controller; - - dma->desired_mode = 0; -#ifdef USE_MODE1 - /* because of the issue below, mode 1 will - * only rarely behave with correct semantics. - */ - if ((urb->transfer_flags & - URB_SHORT_NOT_OK) - && (urb->transfer_buffer_length - - urb->actual_length) - > qh->maxpacket) - dma->desired_mode = 1; -#endif - -/* Disadvantage of using mode 1: - * It's basically usable only for mass storage class; essentially all - * other protocols also terminate transfers on short packets. - * - * Details: - * An extra IN token is sent at the end of the transfer (due to AUTOREQ) - * If you try to use mode 1 for (transfer_buffer_length - 512), and try - * to use the extra IN token to grab the last packet using mode 0, then - * the problem is that you cannot be sure when the device will send the - * last packet and RxPktRdy set. Sometimes the packet is recd too soon - * such that it gets lost when RxCSR is re-set at the end of the mode 1 - * transfer, while sometimes it is recd just a little late so that if you - * try to configure for mode 0 soon after the mode 1 transfer is - * completed, you will find rxcount 0. Okay, so you might think why not - * wait for an interrupt when the pkt is recd. Well, you won't get any! - */ - - val = musb_readw(epio, MUSB_RXCSR); - val &= ~MUSB_RXCSR_H_REQPKT; - - if (dma->desired_mode == 0) - val &= ~MUSB_RXCSR_H_AUTOREQ; - else - val |= MUSB_RXCSR_H_AUTOREQ; - val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; - - musb_writew(epio, MUSB_RXCSR, - MUSB_RXCSR_H_WZC_BITS | val); - - /* REVISIT if when actual_length != 0, - * transfer_buffer_length needs to be - * adjusted first... - */ - ret = c->channel_program( - dma, qh->maxpacket, - dma->desired_mode, - urb->transfer_dma - + urb->actual_length, - (dma->desired_mode == 0) - ? rx_count - : urb->transfer_buffer_length); - - if (!ret) { - c->channel_release(dma); - hw_ep->rx_channel = NULL; - dma = NULL; - /* REVISIT reset CSR */ - } - } -#endif /* Mentor DMA */ - - if (!dma) { - done = musb_host_packet_rx(musb, urb, - epnum, iso_err); - DBG(6, "read %spacket\n", done ? "last " : ""); - } - } - - if (dma && usb_pipeisoc(pipe)) { - struct usb_iso_packet_descriptor *d; - int iso_stat = status; - - d = urb->iso_frame_desc + qh->iso_idx; - d->actual_length += xfer_len; - if (iso_err) { - iso_stat = -EILSEQ; - urb->error_count++; - } - d->status = iso_stat; - } - -finish: - urb->actual_length += xfer_len; - qh->offset += xfer_len; - if (done) { - if (urb->status == -EINPROGRESS) - urb->status = status; - musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); - } -} - -/* schedule nodes correspond to peripheral endpoints, like an OHCI QH. - * the software schedule associates multiple such nodes with a given - * host side hardware endpoint + direction; scheduling may activate - * that hardware endpoint. - */ -static int musb_schedule( - struct musb *musb, - struct musb_qh *qh, - int is_in) -{ - int idle; - int best_diff; - int best_end, epnum; - struct musb_hw_ep *hw_ep = NULL; - struct list_head *head = NULL; - - /* use fixed hardware for control and bulk */ - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: - head = &musb->control; - hw_ep = musb->control_ep; - break; - case USB_ENDPOINT_XFER_BULK: - hw_ep = musb->bulk_ep; - if (is_in) - head = &musb->in_bulk; - else - head = &musb->out_bulk; - break; - } - if (head) { - idle = list_empty(head); - list_add_tail(&qh->ring, head); - goto success; - } - - /* else, periodic transfers get muxed to other endpoints */ - - /* FIXME this doesn't consider direction, so it can only - * work for one half of the endpoint hardware, and assumes - * the previous cases handled all non-shared endpoints... - */ - - /* we know this qh hasn't been scheduled, so all we need to do - * is choose which hardware endpoint to put it on ... - * - * REVISIT what we really want here is a regular schedule tree - * like e.g. OHCI uses, but for now musb->periodic is just an - * array of the _single_ logical endpoint associated with a - * given physical one (identity mapping logical->physical). - * - * that simplistic approach makes TT scheduling a lot simpler; - * there is none, and thus none of its complexity... - */ - best_diff = 4096; - best_end = -1; - - for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { - int diff; - - if (musb->periodic[epnum]) - continue; - hw_ep = &musb->endpoints[epnum]; - if (hw_ep == musb->bulk_ep) - continue; - - if (is_in) - diff = hw_ep->max_packet_sz_rx - qh->maxpacket; - else - diff = hw_ep->max_packet_sz_tx - qh->maxpacket; - - if (diff > 0 && best_diff > diff) { - best_diff = diff; - best_end = epnum; - } - } - if (best_end < 0) - return -ENOSPC; - - idle = 1; - hw_ep = musb->endpoints + best_end; - musb->periodic[best_end] = qh; - DBG(4, "qh %p periodic slot %d\n", qh, best_end); -success: - qh->hw_ep = hw_ep; - qh->hep->hcpriv = qh; - if (idle) - musb_start_urb(musb, is_in, qh); - return 0; -} - -static int musb_urb_enqueue( - struct usb_hcd *hcd, - struct urb *urb, - gfp_t mem_flags) -{ - unsigned long flags; - struct musb *musb = hcd_to_musb(hcd); - struct usb_host_endpoint *hep = urb->ep; - struct musb_qh *qh = hep->hcpriv; - struct usb_endpoint_descriptor *epd = &hep->desc; - int ret; - unsigned type_reg; - unsigned interval; - - /* host role must be active */ - if (!is_host_active(musb) || !musb->is_active) - return -ENODEV; - - spin_lock_irqsave(&musb->lock, flags); - ret = usb_hcd_link_urb_to_ep(hcd, urb); - spin_unlock_irqrestore(&musb->lock, flags); - if (ret) - return ret; - - /* DMA mapping was already done, if needed, and this urb is on - * hep->urb_list ... so there's little to do unless hep wasn't - * yet scheduled onto a live qh. - * - * REVISIT best to keep hep->hcpriv valid until the endpoint gets - * disabled, testing for empty qh->ring and avoiding qh setup costs - * except for the first urb queued after a config change. - */ - if (qh) { - urb->hcpriv = qh; - return 0; - } - - /* Allocate and initialize qh, minimizing the work done each time - * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. - * - * REVISIT consider a dedicated qh kmem_cache, so it's harder - * for bugs in other kernel code to break this driver... - */ - qh = kzalloc(sizeof *qh, mem_flags); - if (!qh) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - return -ENOMEM; - } - - qh->hep = hep; - qh->dev = urb->dev; - INIT_LIST_HEAD(&qh->ring); - qh->is_ready = 1; - - qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); - - /* no high bandwidth support yet */ - if (qh->maxpacket & ~0x7ff) { - ret = -EMSGSIZE; - goto done; - } - - qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; - qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; - - /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ - qh->addr_reg = (u8) usb_pipedevice(urb->pipe); - - /* precompute rxtype/txtype/type0 register */ - type_reg = (qh->type << 4) | qh->epnum; - switch (urb->dev->speed) { - case USB_SPEED_LOW: - type_reg |= 0xc0; - break; - case USB_SPEED_FULL: - type_reg |= 0x80; - break; - default: - type_reg |= 0x40; - } - qh->type_reg = type_reg; - - /* precompute rxinterval/txinterval register */ - interval = min((u8)16, epd->bInterval); /* log encoding */ - switch (qh->type) { - case USB_ENDPOINT_XFER_INT: - /* fullspeed uses linear encoding */ - if (USB_SPEED_FULL == urb->dev->speed) { - interval = epd->bInterval; - if (!interval) - interval = 1; - } - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_ISOC: - /* iso always uses log encoding */ - break; - default: - /* REVISIT we actually want to use NAK limits, hinting to the - * transfer scheduling logic to try some other qh, e.g. try - * for 2 msec first: - * - * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; - * - * The downside of disabling this is that transfer scheduling - * gets VERY unfair for nonperiodic transfers; a misbehaving - * peripheral could make that hurt. Or for reads, one that's - * perfectly normal: network and other drivers keep reads - * posted at all times, having one pending for a week should - * be perfectly safe. - * - * The upside of disabling it is avoidng transfer scheduling - * code to put this aside for while. - */ - interval = 0; - } - qh->intv_reg = interval; - - /* precompute addressing for external hub/tt ports */ - if (musb->is_multipoint) { - struct usb_device *parent = urb->dev->parent; - - if (parent != hcd->self.root_hub) { - qh->h_addr_reg = (u8) parent->devnum; - - /* set up tt info if needed */ - if (urb->dev->tt) { - qh->h_port_reg = (u8) urb->dev->ttport; - qh->h_addr_reg |= 0x80; - } - } - } - - /* invariant: hep->hcpriv is null OR the qh that's already scheduled. - * until we get real dma queues (with an entry for each urb/buffer), - * we only have work to do in the former case. - */ - spin_lock_irqsave(&musb->lock, flags); - if (hep->hcpriv) { - /* some concurrent activity submitted another urb to hep... - * odd, rare, error prone, but legal. - */ - kfree(qh); - ret = 0; - } else - ret = musb_schedule(musb, qh, - epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); - - if (ret == 0) { - urb->hcpriv = qh; - /* FIXME set urb->start_frame for iso/intr, it's tested in - * musb_start_urb(), but otherwise only konicawc cares ... - */ - } - spin_unlock_irqrestore(&musb->lock, flags); - -done: - if (ret != 0) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - kfree(qh); - } - return ret; -} - - -/* - * abort a transfer that's at the head of a hardware queue. - * called with controller locked, irqs blocked - * that hardware queue advances to the next transfer, unless prevented - */ -static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) -{ - struct musb_hw_ep *ep = qh->hw_ep; - void __iomem *epio = ep->regs; - unsigned hw_end = ep->epnum; - void __iomem *regs = ep->musb->mregs; - u16 csr; - int status = 0; - - musb_ep_select(regs, hw_end); - - if (is_dma_capable()) { - struct dma_channel *dma; - - dma = is_in ? ep->rx_channel : ep->tx_channel; - if (dma) { - status = ep->musb->dma_controller->channel_abort(dma); - DBG(status ? 1 : 3, - "abort %cX%d DMA for urb %p --> %d\n", - is_in ? 'R' : 'T', ep->epnum, - urb, status); - urb->actual_length += dma->actual_len; - } - } - - /* turn off DMA requests, discard state, stop polling ... */ - if (is_in) { - /* giveback saves bulk toggle */ - csr = musb_h_flush_rxfifo(ep, 0); - - /* REVISIT we still get an irq; should likely clear the - * endpoint's irq status here to avoid bogus irqs. - * clearing that status is platform-specific... - */ - } else { - musb_h_tx_flush_fifo(ep); - csr = musb_readw(epio, MUSB_TXCSR); - csr &= ~(MUSB_TXCSR_AUTOSET - | MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_H_RXSTALL - | MUSB_TXCSR_H_NAKTIMEOUT - | MUSB_TXCSR_H_ERROR - | MUSB_TXCSR_TXPKTRDY); - musb_writew(epio, MUSB_TXCSR, csr); - /* REVISIT may need to clear FLUSHFIFO ... */ - musb_writew(epio, MUSB_TXCSR, csr); - /* flush cpu writebuffer */ - csr = musb_readw(epio, MUSB_TXCSR); - } - if (status == 0) - musb_advance_schedule(ep->musb, urb, ep, is_in); - return status; -} - -static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) -{ - struct musb *musb = hcd_to_musb(hcd); - struct musb_qh *qh; - struct list_head *sched; - unsigned long flags; - int ret; - - DBG(4, "urb=%p, dev%d ep%d%s\n", urb, - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out"); - - spin_lock_irqsave(&musb->lock, flags); - ret = usb_hcd_check_unlink_urb(hcd, urb, status); - if (ret) - goto done; - - qh = urb->hcpriv; - if (!qh) - goto done; - - /* Any URB not actively programmed into endpoint hardware can be - * immediately given back. Such an URB must be at the head of its - * endpoint queue, unless someday we get real DMA queues. And even - * then, it might not be known to the hardware... - * - * Otherwise abort current transfer, pending dma, etc.; urb->status - * has already been updated. This is a synchronous abort; it'd be - * OK to hold off until after some IRQ, though. - */ - if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) - ret = -EINPROGRESS; - else { - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: - sched = &musb->control; - break; - case USB_ENDPOINT_XFER_BULK: - if (usb_pipein(urb->pipe)) - sched = &musb->in_bulk; - else - sched = &musb->out_bulk; - break; - default: - /* REVISIT when we get a schedule tree, periodic - * transfers won't always be at the head of a - * singleton queue... - */ - sched = NULL; - break; - } - } - - /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ - if (ret < 0 || (sched && qh != first_qh(sched))) { - int ready = qh->is_ready; - - ret = 0; - qh->is_ready = 0; - __musb_giveback(musb, urb, 0); - qh->is_ready = ready; - } else - ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); -done: - spin_unlock_irqrestore(&musb->lock, flags); - return ret; -} - -/* disable an endpoint */ -static void -musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) -{ - u8 epnum = hep->desc.bEndpointAddress; - unsigned long flags; - struct musb *musb = hcd_to_musb(hcd); - u8 is_in = epnum & USB_DIR_IN; - struct musb_qh *qh = hep->hcpriv; - struct urb *urb, *tmp; - struct list_head *sched; - - if (!qh) - return; - - spin_lock_irqsave(&musb->lock, flags); - - switch (qh->type) { - case USB_ENDPOINT_XFER_CONTROL: - sched = &musb->control; - break; - case USB_ENDPOINT_XFER_BULK: - if (is_in) - sched = &musb->in_bulk; - else - sched = &musb->out_bulk; - break; - default: - /* REVISIT when we get a schedule tree, periodic transfers - * won't always be at the head of a singleton queue... - */ - sched = NULL; - break; - } - - /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ - - /* kick first urb off the hardware, if needed */ - qh->is_ready = 0; - if (!sched || qh == first_qh(sched)) { - urb = next_urb(qh); - - /* make software (then hardware) stop ASAP */ - if (!urb->unlinked) - urb->status = -ESHUTDOWN; - - /* cleanup */ - musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); - } else - urb = NULL; - - /* then just nuke all the others */ - list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) - musb_giveback(qh, urb, -ESHUTDOWN); - - spin_unlock_irqrestore(&musb->lock, flags); -} - -static int musb_h_get_frame_number(struct usb_hcd *hcd) -{ - struct musb *musb = hcd_to_musb(hcd); - - return musb_readw(musb->mregs, MUSB_FRAME); -} - -static int musb_h_start(struct usb_hcd *hcd) -{ - struct musb *musb = hcd_to_musb(hcd); - - /* NOTE: musb_start() is called when the hub driver turns - * on port power, or when (OTG) peripheral starts. - */ - hcd->state = HC_STATE_RUNNING; - musb->port1_status = 0; - return 0; -} - -static void musb_h_stop(struct usb_hcd *hcd) -{ - musb_stop(hcd_to_musb(hcd)); - hcd->state = HC_STATE_HALT; -} - -static int musb_bus_suspend(struct usb_hcd *hcd) -{ - struct musb *musb = hcd_to_musb(hcd); - - if (musb->xceiv.state == OTG_STATE_A_SUSPEND) - return 0; - - if (is_host_active(musb) && musb->is_active) { - WARNING("trying to suspend as %s is_active=%i\n", - otg_state_string(musb), musb->is_active); - return -EBUSY; - } else - return 0; -} - -static int musb_bus_resume(struct usb_hcd *hcd) -{ - /* resuming child port does the work */ - return 0; -} - -const struct hc_driver musb_hc_driver = { - .description = "musb-hcd", - .product_desc = "MUSB HDRC host driver", - .hcd_priv_size = sizeof(struct musb), - .flags = HCD_USB2 | HCD_MEMORY, - - /* not using irq handler or reset hooks from usbcore, since - * those must be shared with peripheral code for OTG configs - */ - - .start = musb_h_start, - .stop = musb_h_stop, - - .get_frame_number = musb_h_get_frame_number, - - .urb_enqueue = musb_urb_enqueue, - .urb_dequeue = musb_urb_dequeue, - .endpoint_disable = musb_h_disable, - - .hub_status_data = musb_hub_status_data, - .hub_control = musb_hub_control, - .bus_suspend = musb_bus_suspend, - .bus_resume = musb_bus_resume, - /* .start_port_reset = NULL, */ - /* .hub_irq_enable = NULL, */ -}; diff --git a/trunk/drivers/usb/musb/musb_host.h b/trunk/drivers/usb/musb/musb_host.h deleted file mode 100644 index 77bcdb9d5b32..000000000000 --- a/trunk/drivers/usb/musb/musb_host.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * MUSB OTG driver host defines - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef _MUSB_HOST_H -#define _MUSB_HOST_H - -static inline struct usb_hcd *musb_to_hcd(struct musb *musb) -{ - return container_of((void *) musb, struct usb_hcd, hcd_priv); -} - -static inline struct musb *hcd_to_musb(struct usb_hcd *hcd) -{ - return (struct musb *) (hcd->hcd_priv); -} - -/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */ -struct musb_qh { - struct usb_host_endpoint *hep; /* usbcore info */ - struct usb_device *dev; - struct musb_hw_ep *hw_ep; /* current binding */ - - struct list_head ring; /* of musb_qh */ - /* struct musb_qh *next; */ /* for periodic tree */ - - unsigned offset; /* in urb->transfer_buffer */ - unsigned segsize; /* current xfer fragment */ - - u8 type_reg; /* {rx,tx} type register */ - u8 intv_reg; /* {rx,tx} interval register */ - u8 addr_reg; /* device address register */ - u8 h_addr_reg; /* hub address register */ - u8 h_port_reg; /* hub port register */ - - u8 is_ready; /* safe to modify hw_ep */ - u8 type; /* XFERTYPE_* */ - u8 epnum; - u16 maxpacket; - u16 frame; /* for periodic schedule */ - unsigned iso_idx; /* in urb->iso_frame_desc[] */ -}; - -/* map from control or bulk queue head to the first qh on that ring */ -static inline struct musb_qh *first_qh(struct list_head *q) -{ - if (list_empty(q)) - return NULL; - return list_entry(q->next, struct musb_qh, ring); -} - - -extern void musb_root_disconnect(struct musb *musb); - -struct usb_hcd; - -extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf); -extern int musb_hub_control(struct usb_hcd *hcd, - u16 typeReq, u16 wValue, u16 wIndex, - char *buf, u16 wLength); - -extern const struct hc_driver musb_hc_driver; - -static inline struct urb *next_urb(struct musb_qh *qh) -{ -#ifdef CONFIG_USB_MUSB_HDRC_HCD - struct list_head *queue; - - if (!qh) - return NULL; - queue = &qh->hep->urb_list; - if (list_empty(queue)) - return NULL; - return list_entry(queue->next, struct urb, urb_list); -#else - return NULL; -#endif -} - -#endif /* _MUSB_HOST_H */ diff --git a/trunk/drivers/usb/musb/musb_io.h b/trunk/drivers/usb/musb/musb_io.h deleted file mode 100644 index 6bbedae83af8..000000000000 --- a/trunk/drivers/usb/musb/musb_io.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * MUSB OTG driver register I/O - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__ -#define __MUSB_LINUX_PLATFORM_ARCH_H__ - -#include - -#ifndef CONFIG_ARM -static inline void readsl(const void __iomem *addr, void *buf, int len) - { insl((unsigned long)addr, buf, len); } -static inline void readsw(const void __iomem *addr, void *buf, int len) - { insw((unsigned long)addr, buf, len); } -static inline void readsb(const void __iomem *addr, void *buf, int len) - { insb((unsigned long)addr, buf, len); } - -static inline void writesl(const void __iomem *addr, const void *buf, int len) - { outsl((unsigned long)addr, buf, len); } -static inline void writesw(const void __iomem *addr, const void *buf, int len) - { outsw((unsigned long)addr, buf, len); } -static inline void writesb(const void __iomem *addr, const void *buf, int len) - { outsb((unsigned long)addr, buf, len); } - -#endif - -/* NOTE: these offsets are all in bytes */ - -static inline u16 musb_readw(const void __iomem *addr, unsigned offset) - { return __raw_readw(addr + offset); } - -static inline u32 musb_readl(const void __iomem *addr, unsigned offset) - { return __raw_readl(addr + offset); } - - -static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) - { __raw_writew(data, addr + offset); } - -static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) - { __raw_writel(data, addr + offset); } - - -#ifdef CONFIG_USB_TUSB6010 - -/* - * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. - */ -static inline u8 musb_readb(const void __iomem *addr, unsigned offset) -{ - u16 tmp; - u8 val; - - tmp = __raw_readw(addr + (offset & ~1)); - if (offset & 1) - val = (tmp >> 8); - else - val = tmp & 0xff; - - return val; -} - -static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) -{ - u16 tmp; - - tmp = __raw_readw(addr + (offset & ~1)); - if (offset & 1) - tmp = (data << 8) | (tmp & 0xff); - else - tmp = (tmp & 0xff00) | data; - - __raw_writew(tmp, addr + (offset & ~1)); -} - -#else - -static inline u8 musb_readb(const void __iomem *addr, unsigned offset) - { return __raw_readb(addr + offset); } - -static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) - { __raw_writeb(data, addr + offset); } - -#endif /* CONFIG_USB_TUSB6010 */ - -#endif diff --git a/trunk/drivers/usb/musb/musb_procfs.c b/trunk/drivers/usb/musb/musb_procfs.c deleted file mode 100644 index 55e6b78bdccc..000000000000 --- a/trunk/drivers/usb/musb/musb_procfs.c +++ /dev/null @@ -1,830 +0,0 @@ -/* - * MUSB OTG driver debug support - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include /* FIXME remove procfs writes */ -#include - -#include "musb_core.h" - -#include "davinci.h" - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - -static int dump_qh(struct musb_qh *qh, char *buf, unsigned max) -{ - int count; - int tmp; - struct usb_host_endpoint *hep = qh->hep; - struct urb *urb; - - count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n", - qh, qh->dev->devnum, qh->epnum, - ({ char *s; switch (qh->type) { - case USB_ENDPOINT_XFER_BULK: - s = "-bulk"; break; - case USB_ENDPOINT_XFER_INT: - s = "-int"; break; - case USB_ENDPOINT_XFER_CONTROL: - s = ""; break; - default: - s = "iso"; break; - }; s; }), - qh->maxpacket); - if (count <= 0) - return 0; - buf += count; - max -= count; - - list_for_each_entry(urb, &hep->urb_list, urb_list) { - tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n", - usb_pipein(urb->pipe) ? "in" : "out", - urb, urb->actual_length, - urb->transfer_buffer_length); - if (tmp <= 0) - break; - tmp = min(tmp, (int)max); - count += tmp; - buf += tmp; - max -= tmp; - } - return count; -} - -static int -dump_queue(struct list_head *q, char *buf, unsigned max) -{ - int count = 0; - struct musb_qh *qh; - - list_for_each_entry(qh, q, ring) { - int tmp; - - tmp = dump_qh(qh, buf, max); - if (tmp <= 0) - break; - tmp = min(tmp, (int)max); - count += tmp; - buf += tmp; - max -= tmp; - } - return count; -} - -#endif /* HCD */ - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC -static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max) -{ - char *buf = buffer; - int code = 0; - void __iomem *regs = ep->hw_ep->regs; - char *mode = "1buf"; - - if (ep->is_in) { - if (ep->hw_ep->tx_double_buffered) - mode = "2buf"; - } else { - if (ep->hw_ep->rx_double_buffered) - mode = "2buf"; - } - - do { - struct usb_request *req; - - code = snprintf(buf, max, - "\n%s (hw%d): %s%s, csr %04x maxp %04x\n", - ep->name, ep->current_epnum, - mode, ep->dma ? " dma" : "", - musb_readw(regs, - (ep->is_in || !ep->current_epnum) - ? MUSB_TXCSR - : MUSB_RXCSR), - musb_readw(regs, ep->is_in - ? MUSB_TXMAXP - : MUSB_RXMAXP) - ); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - - if (is_cppi_enabled() && ep->current_epnum) { - unsigned cppi = ep->current_epnum - 1; - void __iomem *base = ep->musb->ctrl_base; - unsigned off1 = cppi << 2; - void __iomem *ram = base; - char tmp[16]; - - if (ep->is_in) { - ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi); - tmp[0] = 0; - } else { - ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi); - snprintf(tmp, sizeof tmp, "%d left, ", - musb_readl(base, - DAVINCI_RXCPPI_BUFCNT0_REG + off1)); - } - - code = snprintf(buf, max, "%cX DMA%d: %s" - "%08x %08x, %08x %08x; " - "%08x %08x %08x .. %08x\n", - ep->is_in ? 'T' : 'R', - ep->current_epnum - 1, tmp, - musb_readl(ram, 0 * 4), - musb_readl(ram, 1 * 4), - musb_readl(ram, 2 * 4), - musb_readl(ram, 3 * 4), - musb_readl(ram, 4 * 4), - musb_readl(ram, 5 * 4), - musb_readl(ram, 6 * 4), - musb_readl(ram, 7 * 4)); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - - if (list_empty(&ep->req_list)) { - code = snprintf(buf, max, "\t(queue empty)\n"); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - break; - } - list_for_each_entry(req, &ep->req_list, list) { - code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n", - req, - req->zero ? "zero, " : "", - req->short_not_ok ? "!short, " : "", - req->actual, req->length); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - } while (0); - return buf - buffer; -} -#endif - -static int -dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max) -{ - int code = 0; - char *buf = aBuffer; - struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; - - do { - musb_ep_select(musb->mregs, epnum); -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (is_host_active(musb)) { - int dump_rx, dump_tx; - void __iomem *regs = hw_ep->regs; - - /* TEMPORARY (!) until we have a real periodic - * schedule tree ... - */ - if (!epnum) { - /* control is shared, uses RX queue - * but (mostly) shadowed tx registers - */ - dump_tx = !list_empty(&musb->control); - dump_rx = 0; - } else if (hw_ep == musb->bulk_ep) { - dump_tx = !list_empty(&musb->out_bulk); - dump_rx = !list_empty(&musb->in_bulk); - } else if (musb->periodic[epnum]) { - struct usb_host_endpoint *hep; - - hep = musb->periodic[epnum]->hep; - dump_rx = hep->desc.bEndpointAddress - & USB_ENDPOINT_DIR_MASK; - dump_tx = !dump_rx; - } else - break; - /* END TEMPORARY */ - - - if (dump_rx) { - code = snprintf(buf, max, - "\nRX%d: %s rxcsr %04x interval %02x " - "max %04x type %02x; " - "dev %d hub %d port %d" - "\n", - epnum, - hw_ep->rx_double_buffered - ? "2buf" : "1buf", - musb_readw(regs, MUSB_RXCSR), - musb_readb(regs, MUSB_RXINTERVAL), - musb_readw(regs, MUSB_RXMAXP), - musb_readb(regs, MUSB_RXTYPE), - /* FIXME: assumes multipoint */ - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_RXFUNCADDR)), - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_RXHUBADDR)), - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_RXHUBPORT)) - ); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - - if (is_cppi_enabled() - && epnum - && hw_ep->rx_channel) { - unsigned cppi = epnum - 1; - unsigned off1 = cppi << 2; - void __iomem *base; - void __iomem *ram; - char tmp[16]; - - base = musb->ctrl_base; - ram = DAVINCI_RXCPPI_STATERAM_OFFSET( - cppi) + base; - snprintf(tmp, sizeof tmp, "%d left, ", - musb_readl(base, - DAVINCI_RXCPPI_BUFCNT0_REG - + off1)); - - code = snprintf(buf, max, - " rx dma%d: %s" - "%08x %08x, %08x %08x; " - "%08x %08x %08x .. %08x\n", - cppi, tmp, - musb_readl(ram, 0 * 4), - musb_readl(ram, 1 * 4), - musb_readl(ram, 2 * 4), - musb_readl(ram, 3 * 4), - musb_readl(ram, 4 * 4), - musb_readl(ram, 5 * 4), - musb_readl(ram, 6 * 4), - musb_readl(ram, 7 * 4)); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - - if (hw_ep == musb->bulk_ep - && !list_empty( - &musb->in_bulk)) { - code = dump_queue(&musb->in_bulk, - buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } else if (musb->periodic[epnum]) { - code = dump_qh(musb->periodic[epnum], - buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - } - - if (dump_tx) { - code = snprintf(buf, max, - "\nTX%d: %s txcsr %04x interval %02x " - "max %04x type %02x; " - "dev %d hub %d port %d" - "\n", - epnum, - hw_ep->tx_double_buffered - ? "2buf" : "1buf", - musb_readw(regs, MUSB_TXCSR), - musb_readb(regs, MUSB_TXINTERVAL), - musb_readw(regs, MUSB_TXMAXP), - musb_readb(regs, MUSB_TXTYPE), - /* FIXME: assumes multipoint */ - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_TXFUNCADDR)), - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_TXHUBADDR)), - musb_readb(musb->mregs, - MUSB_BUSCTL_OFFSET(epnum, - MUSB_TXHUBPORT)) - ); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - - if (is_cppi_enabled() - && epnum - && hw_ep->tx_channel) { - unsigned cppi = epnum - 1; - void __iomem *base; - void __iomem *ram; - - base = musb->ctrl_base; - ram = DAVINCI_RXCPPI_STATERAM_OFFSET( - cppi) + base; - code = snprintf(buf, max, - " tx dma%d: " - "%08x %08x, %08x %08x; " - "%08x %08x %08x .. %08x\n", - cppi, - musb_readl(ram, 0 * 4), - musb_readl(ram, 1 * 4), - musb_readl(ram, 2 * 4), - musb_readl(ram, 3 * 4), - musb_readl(ram, 4 * 4), - musb_readl(ram, 5 * 4), - musb_readl(ram, 6 * 4), - musb_readl(ram, 7 * 4)); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - - if (hw_ep == musb->control_ep - && !list_empty( - &musb->control)) { - code = dump_queue(&musb->control, - buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } else if (hw_ep == musb->bulk_ep - && !list_empty( - &musb->out_bulk)) { - code = dump_queue(&musb->out_bulk, - buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } else if (musb->periodic[epnum]) { - code = dump_qh(musb->periodic[epnum], - buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - } - } -#endif -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - if (is_peripheral_active(musb)) { - code = 0; - - if (hw_ep->ep_in.desc || !epnum) { - code = dump_ep(&hw_ep->ep_in, buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - if (hw_ep->ep_out.desc) { - code = dump_ep(&hw_ep->ep_out, buf, max); - if (code <= 0) - break; - code = min(code, (int) max); - buf += code; - max -= code; - } - } -#endif - } while (0); - - return buf - aBuffer; -} - -/* Dump the current status and compile options. - * @param musb the device driver instance - * @param buffer where to dump the status; it must be big enough to hold the - * result otherwise "BAD THINGS HAPPENS(TM)". - */ -static int dump_header_stats(struct musb *musb, char *buffer) -{ - int code, count = 0; - const void __iomem *mbase = musb->mregs; - - *buffer = 0; - count = sprintf(buffer, "Status: %sHDRC, Mode=%s " - "(Power=%02x, DevCtl=%02x)\n", - (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb), - musb_readb(mbase, MUSB_POWER), - musb_readb(mbase, MUSB_DEVCTL)); - if (count <= 0) - return 0; - buffer += count; - - code = sprintf(buffer, "OTG state: %s; %sactive\n", - otg_state_string(musb), - musb->is_active ? "" : "in"); - if (code <= 0) - goto done; - buffer += code; - count += code; - - code = sprintf(buffer, - "Options: " -#ifdef CONFIG_MUSB_PIO_ONLY - "pio" -#elif defined(CONFIG_USB_TI_CPPI_DMA) - "cppi-dma" -#elif defined(CONFIG_USB_INVENTRA_DMA) - "musb-dma" -#elif defined(CONFIG_USB_TUSB_OMAP_DMA) - "tusb-omap-dma" -#else - "?dma?" -#endif - ", " -#ifdef CONFIG_USB_MUSB_OTG - "otg (peripheral+host)" -#elif defined(CONFIG_USB_GADGET_MUSB_HDRC) - "peripheral" -#elif defined(CONFIG_USB_MUSB_HDRC_HCD) - "host" -#endif - ", debug=%d [eps=%d]\n", - debug, - musb->nr_endpoints); - if (code <= 0) - goto done; - count += code; - buffer += code; - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - code = sprintf(buffer, "Peripheral address: %02x\n", - musb_readb(musb->ctrl_base, MUSB_FADDR)); - if (code <= 0) - goto done; - buffer += code; - count += code; -#endif - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - code = sprintf(buffer, "Root port status: %08x\n", - musb->port1_status); - if (code <= 0) - goto done; - buffer += code; - count += code; -#endif - -#ifdef CONFIG_ARCH_DAVINCI - code = sprintf(buffer, - "DaVinci: ctrl=%02x stat=%1x phy=%03x\n" - "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x" - "\n", - musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG), - musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG), - __raw_readl((void __force __iomem *) - IO_ADDRESS(USBPHY_CTL_PADDR)), - musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG), - musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG), - musb_readl(musb->ctrl_base, - DAVINCI_USB_INT_SOURCE_REG), - musb_readl(musb->ctrl_base, - DAVINCI_USB_INT_MASK_REG)); - if (code <= 0) - goto done; - count += code; - buffer += code; -#endif /* DAVINCI */ - -#ifdef CONFIG_USB_TUSB6010 - code = sprintf(buffer, - "TUSB6010: devconf %08x, phy enable %08x drive %08x" - "\n\totg %03x timer %08x" - "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x" - "\n", - musb_readl(musb->ctrl_base, TUSB_DEV_CONF), - musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE), - musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL), - musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT), - musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER), - musb_readl(musb->ctrl_base, TUSB_PRCM_CONF), - musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT), - musb_readl(musb->ctrl_base, TUSB_INT_SRC), - musb_readl(musb->ctrl_base, TUSB_INT_MASK)); - if (code <= 0) - goto done; - count += code; - buffer += code; -#endif /* DAVINCI */ - - if (is_cppi_enabled() && musb->dma_controller) { - code = sprintf(buffer, - "CPPI: txcr=%d txsrc=%01x txena=%01x; " - "rxcr=%d rxsrc=%01x rxena=%01x " - "\n", - musb_readl(musb->ctrl_base, - DAVINCI_TXCPPI_CTRL_REG), - musb_readl(musb->ctrl_base, - DAVINCI_TXCPPI_RAW_REG), - musb_readl(musb->ctrl_base, - DAVINCI_TXCPPI_INTENAB_REG), - musb_readl(musb->ctrl_base, - DAVINCI_RXCPPI_CTRL_REG), - musb_readl(musb->ctrl_base, - DAVINCI_RXCPPI_RAW_REG), - musb_readl(musb->ctrl_base, - DAVINCI_RXCPPI_INTENAB_REG)); - if (code <= 0) - goto done; - count += code; - buffer += code; - } - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - if (is_peripheral_enabled(musb)) { - code = sprintf(buffer, "Gadget driver: %s\n", - musb->gadget_driver - ? musb->gadget_driver->driver.name - : "(none)"); - if (code <= 0) - goto done; - count += code; - buffer += code; - } -#endif - -done: - return count; -} - -/* Write to ProcFS - * - * C soft-connect - * c soft-disconnect - * I enable HS - * i disable HS - * s stop session - * F force session (OTG-unfriendly) - * E rElinquish bus (OTG) - * H request host mode - * h cancel host request - * T start sending TEST_PACKET - * D set/query the debug level - */ -static int musb_proc_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) -{ - char cmd; - u8 reg; - struct musb *musb = (struct musb *)data; - void __iomem *mbase = musb->mregs; - - /* MOD_INC_USE_COUNT; */ - - if (unlikely(copy_from_user(&cmd, buffer, 1))) - return -EFAULT; - - switch (cmd) { - case 'C': - if (mbase) { - reg = musb_readb(mbase, MUSB_POWER) - | MUSB_POWER_SOFTCONN; - musb_writeb(mbase, MUSB_POWER, reg); - } - break; - - case 'c': - if (mbase) { - reg = musb_readb(mbase, MUSB_POWER) - & ~MUSB_POWER_SOFTCONN; - musb_writeb(mbase, MUSB_POWER, reg); - } - break; - - case 'I': - if (mbase) { - reg = musb_readb(mbase, MUSB_POWER) - | MUSB_POWER_HSENAB; - musb_writeb(mbase, MUSB_POWER, reg); - } - break; - - case 'i': - if (mbase) { - reg = musb_readb(mbase, MUSB_POWER) - & ~MUSB_POWER_HSENAB; - musb_writeb(mbase, MUSB_POWER, reg); - } - break; - - case 'F': - reg = musb_readb(mbase, MUSB_DEVCTL); - reg |= MUSB_DEVCTL_SESSION; - musb_writeb(mbase, MUSB_DEVCTL, reg); - break; - - case 'H': - if (mbase) { - reg = musb_readb(mbase, MUSB_DEVCTL); - reg |= MUSB_DEVCTL_HR; - musb_writeb(mbase, MUSB_DEVCTL, reg); - /* MUSB_HST_MODE( ((struct musb*)data) ); */ - /* WARNING("Host Mode\n"); */ - } - break; - - case 'h': - if (mbase) { - reg = musb_readb(mbase, MUSB_DEVCTL); - reg &= ~MUSB_DEVCTL_HR; - musb_writeb(mbase, MUSB_DEVCTL, reg); - } - break; - - case 'T': - if (mbase) { - musb_load_testpacket(musb); - musb_writeb(mbase, MUSB_TESTMODE, - MUSB_TEST_PACKET); - } - break; - -#if (MUSB_DEBUG > 0) - /* set/read debug level */ - case 'D':{ - if (count > 1) { - char digits[8], *p = digits; - int i = 0, level = 0, sign = 1; - int len = min(count - 1, (unsigned long)8); - - if (copy_from_user(&digits, &buffer[1], len)) - return -EFAULT; - - /* optional sign */ - if (*p == '-') { - len -= 1; - sign = -sign; - p++; - } - - /* read it */ - while (i++ < len && *p > '0' && *p < '9') { - level = level * 10 + (*p - '0'); - p++; - } - - level *= sign; - DBG(1, "debug level %d\n", level); - debug = level; - } - } - break; - - - case '?': - INFO("?: you are seeing it\n"); - INFO("C/c: soft connect enable/disable\n"); - INFO("I/i: hispeed enable/disable\n"); - INFO("F: force session start\n"); - INFO("H: host mode\n"); - INFO("T: start sending TEST_PACKET\n"); - INFO("D: set/read dbug level\n"); - break; -#endif - - default: - ERR("Command %c not implemented\n", cmd); - break; - } - - musb_platform_try_idle(musb, 0); - - return count; -} - -static int musb_proc_read(char *page, char **start, - off_t off, int count, int *eof, void *data) -{ - char *buffer = page; - int code = 0; - unsigned long flags; - struct musb *musb = data; - unsigned epnum; - - count -= off; - count -= 1; /* for NUL at end */ - if (count <= 0) - return -EINVAL; - - spin_lock_irqsave(&musb->lock, flags); - - code = dump_header_stats(musb, buffer); - if (code > 0) { - buffer += code; - count -= code; - } - - /* generate the report for the end points */ - /* REVISIT ... not unless something's connected! */ - for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints; - epnum++) { - code = dump_end_info(musb, epnum, buffer, count); - if (code > 0) { - buffer += code; - count -= code; - } - } - - musb_platform_try_idle(musb, 0); - - spin_unlock_irqrestore(&musb->lock, flags); - *eof = 1; - - return buffer - page; -} - -void __devexit musb_debug_delete(char *name, struct musb *musb) -{ - if (musb->proc_entry) - remove_proc_entry(name, NULL); -} - -struct proc_dir_entry *__init -musb_debug_create(char *name, struct musb *data) -{ - struct proc_dir_entry *pde; - - /* FIXME convert everything to seq_file; then later, debugfs */ - - if (!name) - return NULL; - - pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL); - data->proc_entry = pde; - if (pde) { - pde->data = data; - /* pde->owner = THIS_MODULE; */ - - pde->read_proc = musb_proc_read; - pde->write_proc = musb_proc_write; - - pde->size = 0; - - pr_debug("Registered /proc/%s\n", name); - } else { - pr_debug("Cannot create a valid proc file entry"); - } - - return pde; -} diff --git a/trunk/drivers/usb/musb/musb_regs.h b/trunk/drivers/usb/musb/musb_regs.h deleted file mode 100644 index 9c228661aa5a..000000000000 --- a/trunk/drivers/usb/musb/musb_regs.h +++ /dev/null @@ -1,300 +0,0 @@ -/* - * MUSB OTG driver register defines - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __MUSB_REGS_H__ -#define __MUSB_REGS_H__ - -#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ - -/* - * Common USB registers - */ - -#define MUSB_FADDR 0x00 /* 8-bit */ -#define MUSB_POWER 0x01 /* 8-bit */ - -#define MUSB_INTRTX 0x02 /* 16-bit */ -#define MUSB_INTRRX 0x04 -#define MUSB_INTRTXE 0x06 -#define MUSB_INTRRXE 0x08 -#define MUSB_INTRUSB 0x0A /* 8 bit */ -#define MUSB_INTRUSBE 0x0B /* 8 bit */ -#define MUSB_FRAME 0x0C -#define MUSB_INDEX 0x0E /* 8 bit */ -#define MUSB_TESTMODE 0x0F /* 8 bit */ - -/* Get offset for a given FIFO from musb->mregs */ -#ifdef CONFIG_USB_TUSB6010 -#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) -#else -#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) -#endif - -/* - * Additional Control Registers - */ - -#define MUSB_DEVCTL 0x60 /* 8 bit */ - -/* These are always controlled through the INDEX register */ -#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ -#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ -#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */ -#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */ - -/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ -#define MUSB_HWVERS 0x6C /* 8 bit */ - -#define MUSB_EPINFO 0x78 /* 8 bit */ -#define MUSB_RAMINFO 0x79 /* 8 bit */ -#define MUSB_LINKINFO 0x7a /* 8 bit */ -#define MUSB_VPLEN 0x7b /* 8 bit */ -#define MUSB_HS_EOF1 0x7c /* 8 bit */ -#define MUSB_FS_EOF1 0x7d /* 8 bit */ -#define MUSB_LS_EOF1 0x7e /* 8 bit */ - -/* Offsets to endpoint registers */ -#define MUSB_TXMAXP 0x00 -#define MUSB_TXCSR 0x02 -#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ -#define MUSB_RXMAXP 0x04 -#define MUSB_RXCSR 0x06 -#define MUSB_RXCOUNT 0x08 -#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ -#define MUSB_TXTYPE 0x0A -#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ -#define MUSB_TXINTERVAL 0x0B -#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ -#define MUSB_RXTYPE 0x0C -#define MUSB_RXINTERVAL 0x0D -#define MUSB_FIFOSIZE 0x0F -#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */ - -/* Offsets to endpoint registers in indexed model (using INDEX register) */ -#define MUSB_INDEXED_OFFSET(_epnum, _offset) \ - (0x10 + (_offset)) - -/* Offsets to endpoint registers in flat models */ -#define MUSB_FLAT_OFFSET(_epnum, _offset) \ - (0x100 + (0x10*(_epnum)) + (_offset)) - -#ifdef CONFIG_USB_TUSB6010 -/* TUSB6010 EP0 configuration register is special */ -#define MUSB_TUSB_OFFSET(_epnum, _offset) \ - (0x10 + _offset) -#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */ -#endif - -/* "bus control"/target registers, for host side multipoint (external hubs) */ -#define MUSB_TXFUNCADDR 0x00 -#define MUSB_TXHUBADDR 0x02 -#define MUSB_TXHUBPORT 0x03 - -#define MUSB_RXFUNCADDR 0x04 -#define MUSB_RXHUBADDR 0x06 -#define MUSB_RXHUBPORT 0x07 - -#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ - (0x80 + (8*(_epnum)) + (_offset)) - -/* - * MUSB Register bits - */ - -/* POWER */ -#define MUSB_POWER_ISOUPDATE 0x80 -#define MUSB_POWER_SOFTCONN 0x40 -#define MUSB_POWER_HSENAB 0x20 -#define MUSB_POWER_HSMODE 0x10 -#define MUSB_POWER_RESET 0x08 -#define MUSB_POWER_RESUME 0x04 -#define MUSB_POWER_SUSPENDM 0x02 -#define MUSB_POWER_ENSUSPEND 0x01 - -/* INTRUSB */ -#define MUSB_INTR_SUSPEND 0x01 -#define MUSB_INTR_RESUME 0x02 -#define MUSB_INTR_RESET 0x04 -#define MUSB_INTR_BABBLE 0x04 -#define MUSB_INTR_SOF 0x08 -#define MUSB_INTR_CONNECT 0x10 -#define MUSB_INTR_DISCONNECT 0x20 -#define MUSB_INTR_SESSREQ 0x40 -#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */ - -/* DEVCTL */ -#define MUSB_DEVCTL_BDEVICE 0x80 -#define MUSB_DEVCTL_FSDEV 0x40 -#define MUSB_DEVCTL_LSDEV 0x20 -#define MUSB_DEVCTL_VBUS 0x18 -#define MUSB_DEVCTL_VBUS_SHIFT 3 -#define MUSB_DEVCTL_HM 0x04 -#define MUSB_DEVCTL_HR 0x02 -#define MUSB_DEVCTL_SESSION 0x01 - -/* TESTMODE */ -#define MUSB_TEST_FORCE_HOST 0x80 -#define MUSB_TEST_FIFO_ACCESS 0x40 -#define MUSB_TEST_FORCE_FS 0x20 -#define MUSB_TEST_FORCE_HS 0x10 -#define MUSB_TEST_PACKET 0x08 -#define MUSB_TEST_K 0x04 -#define MUSB_TEST_J 0x02 -#define MUSB_TEST_SE0_NAK 0x01 - -/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */ -#define MUSB_FIFOSZ_DPB 0x10 -/* Allocation size (8, 16, 32, ... 4096) */ -#define MUSB_FIFOSZ_SIZE 0x0f - -/* CSR0 */ -#define MUSB_CSR0_FLUSHFIFO 0x0100 -#define MUSB_CSR0_TXPKTRDY 0x0002 -#define MUSB_CSR0_RXPKTRDY 0x0001 - -/* CSR0 in Peripheral mode */ -#define MUSB_CSR0_P_SVDSETUPEND 0x0080 -#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040 -#define MUSB_CSR0_P_SENDSTALL 0x0020 -#define MUSB_CSR0_P_SETUPEND 0x0010 -#define MUSB_CSR0_P_DATAEND 0x0008 -#define MUSB_CSR0_P_SENTSTALL 0x0004 - -/* CSR0 in Host mode */ -#define MUSB_CSR0_H_DIS_PING 0x0800 -#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */ -#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */ -#define MUSB_CSR0_H_NAKTIMEOUT 0x0080 -#define MUSB_CSR0_H_STATUSPKT 0x0040 -#define MUSB_CSR0_H_REQPKT 0x0020 -#define MUSB_CSR0_H_ERROR 0x0010 -#define MUSB_CSR0_H_SETUPPKT 0x0008 -#define MUSB_CSR0_H_RXSTALL 0x0004 - -/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */ -#define MUSB_CSR0_P_WZC_BITS \ - (MUSB_CSR0_P_SENTSTALL) -#define MUSB_CSR0_H_WZC_BITS \ - (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \ - | MUSB_CSR0_RXPKTRDY) - -/* TxType/RxType */ -#define MUSB_TYPE_SPEED 0xc0 -#define MUSB_TYPE_SPEED_SHIFT 6 -#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */ -#define MUSB_TYPE_PROTO_SHIFT 4 -#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */ - -/* CONFIGDATA */ -#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */ -#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */ -#define MUSB_CONFIGDATA_BIGENDIAN 0x20 -#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ -#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ -#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */ -#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ -#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */ - -/* TXCSR in Peripheral and Host mode */ -#define MUSB_TXCSR_AUTOSET 0x8000 -#define MUSB_TXCSR_MODE 0x2000 -#define MUSB_TXCSR_DMAENAB 0x1000 -#define MUSB_TXCSR_FRCDATATOG 0x0800 -#define MUSB_TXCSR_DMAMODE 0x0400 -#define MUSB_TXCSR_CLRDATATOG 0x0040 -#define MUSB_TXCSR_FLUSHFIFO 0x0008 -#define MUSB_TXCSR_FIFONOTEMPTY 0x0002 -#define MUSB_TXCSR_TXPKTRDY 0x0001 - -/* TXCSR in Peripheral mode */ -#define MUSB_TXCSR_P_ISO 0x4000 -#define MUSB_TXCSR_P_INCOMPTX 0x0080 -#define MUSB_TXCSR_P_SENTSTALL 0x0020 -#define MUSB_TXCSR_P_SENDSTALL 0x0010 -#define MUSB_TXCSR_P_UNDERRUN 0x0004 - -/* TXCSR in Host mode */ -#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200 -#define MUSB_TXCSR_H_DATATOGGLE 0x0100 -#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080 -#define MUSB_TXCSR_H_RXSTALL 0x0020 -#define MUSB_TXCSR_H_ERROR 0x0004 - -/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ -#define MUSB_TXCSR_P_WZC_BITS \ - (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \ - | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY) -#define MUSB_TXCSR_H_WZC_BITS \ - (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \ - | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY) - -/* RXCSR in Peripheral and Host mode */ -#define MUSB_RXCSR_AUTOCLEAR 0x8000 -#define MUSB_RXCSR_DMAENAB 0x2000 -#define MUSB_RXCSR_DISNYET 0x1000 -#define MUSB_RXCSR_PID_ERR 0x1000 -#define MUSB_RXCSR_DMAMODE 0x0800 -#define MUSB_RXCSR_INCOMPRX 0x0100 -#define MUSB_RXCSR_CLRDATATOG 0x0080 -#define MUSB_RXCSR_FLUSHFIFO 0x0010 -#define MUSB_RXCSR_DATAERROR 0x0008 -#define MUSB_RXCSR_FIFOFULL 0x0002 -#define MUSB_RXCSR_RXPKTRDY 0x0001 - -/* RXCSR in Peripheral mode */ -#define MUSB_RXCSR_P_ISO 0x4000 -#define MUSB_RXCSR_P_SENTSTALL 0x0040 -#define MUSB_RXCSR_P_SENDSTALL 0x0020 -#define MUSB_RXCSR_P_OVERRUN 0x0004 - -/* RXCSR in Host mode */ -#define MUSB_RXCSR_H_AUTOREQ 0x4000 -#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400 -#define MUSB_RXCSR_H_DATATOGGLE 0x0200 -#define MUSB_RXCSR_H_RXSTALL 0x0040 -#define MUSB_RXCSR_H_REQPKT 0x0020 -#define MUSB_RXCSR_H_ERROR 0x0004 - -/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ -#define MUSB_RXCSR_P_WZC_BITS \ - (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \ - | MUSB_RXCSR_RXPKTRDY) -#define MUSB_RXCSR_H_WZC_BITS \ - (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \ - | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY) - -/* HUBADDR */ -#define MUSB_HUBADDR_MULTI_TT 0x80 - -#endif /* __MUSB_REGS_H__ */ diff --git a/trunk/drivers/usb/musb/musb_virthub.c b/trunk/drivers/usb/musb/musb_virthub.c deleted file mode 100644 index e0e9ce584175..000000000000 --- a/trunk/drivers/usb/musb/musb_virthub.c +++ /dev/null @@ -1,425 +0,0 @@ -/* - * MUSB OTG driver virtual root hub support - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2006 by Texas Instruments - * Copyright (C) 2006-2007 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "musb_core.h" - - -static void musb_port_suspend(struct musb *musb, bool do_suspend) -{ - u8 power; - void __iomem *mbase = musb->mregs; - - if (!is_host_active(musb)) - return; - - /* NOTE: this doesn't necessarily put PHY into low power mode, - * turning off its clock; that's a function of PHY integration and - * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect - * SE0 changing to connect (J) or wakeup (K) states. - */ - power = musb_readb(mbase, MUSB_POWER); - if (do_suspend) { - int retries = 10000; - - power &= ~MUSB_POWER_RESUME; - power |= MUSB_POWER_SUSPENDM; - musb_writeb(mbase, MUSB_POWER, power); - - /* Needed for OPT A tests */ - power = musb_readb(mbase, MUSB_POWER); - while (power & MUSB_POWER_SUSPENDM) { - power = musb_readb(mbase, MUSB_POWER); - if (retries-- < 1) - break; - } - - DBG(3, "Root port suspended, power %02x\n", power); - - musb->port1_status |= USB_PORT_STAT_SUSPEND; - switch (musb->xceiv.state) { - case OTG_STATE_A_HOST: - musb->xceiv.state = OTG_STATE_A_SUSPEND; - musb->is_active = is_otg_enabled(musb) - && musb->xceiv.host->b_hnp_enable; - musb_platform_try_idle(musb, 0); - break; -#ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_B_HOST: - musb->xceiv.state = OTG_STATE_B_WAIT_ACON; - musb->is_active = is_otg_enabled(musb) - && musb->xceiv.host->b_hnp_enable; - musb_platform_try_idle(musb, 0); - break; -#endif - default: - DBG(1, "bogus rh suspend? %s\n", - otg_state_string(musb)); - } - } else if (power & MUSB_POWER_SUSPENDM) { - power &= ~MUSB_POWER_SUSPENDM; - power |= MUSB_POWER_RESUME; - musb_writeb(mbase, MUSB_POWER, power); - - DBG(3, "Root port resuming, power %02x\n", power); - - /* later, GetPortStatus will stop RESUME signaling */ - musb->port1_status |= MUSB_PORT_STAT_RESUME; - musb->rh_timer = jiffies + msecs_to_jiffies(20); - } -} - -static void musb_port_reset(struct musb *musb, bool do_reset) -{ - u8 power; - void __iomem *mbase = musb->mregs; - -#ifdef CONFIG_USB_MUSB_OTG - if (musb->xceiv.state == OTG_STATE_B_IDLE) { - DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); - musb->port1_status &= ~USB_PORT_STAT_RESET; - return; - } -#endif - - if (!is_host_active(musb)) - return; - - /* NOTE: caller guarantees it will turn off the reset when - * the appropriate amount of time has passed - */ - power = musb_readb(mbase, MUSB_POWER); - if (do_reset) { - - /* - * If RESUME is set, we must make sure it stays minimum 20 ms. - * Then we must clear RESUME and wait a bit to let musb start - * generating SOFs. If we don't do this, OPT HS A 6.8 tests - * fail with "Error! Did not receive an SOF before suspend - * detected". - */ - if (power & MUSB_POWER_RESUME) { - while (time_before(jiffies, musb->rh_timer)) - msleep(1); - musb_writeb(mbase, MUSB_POWER, - power & ~MUSB_POWER_RESUME); - msleep(1); - } - - musb->ignore_disconnect = true; - power &= 0xf0; - musb_writeb(mbase, MUSB_POWER, - power | MUSB_POWER_RESET); - - musb->port1_status |= USB_PORT_STAT_RESET; - musb->port1_status &= ~USB_PORT_STAT_ENABLE; - musb->rh_timer = jiffies + msecs_to_jiffies(50); - } else { - DBG(4, "root port reset stopped\n"); - musb_writeb(mbase, MUSB_POWER, - power & ~MUSB_POWER_RESET); - - musb->ignore_disconnect = false; - - power = musb_readb(mbase, MUSB_POWER); - if (power & MUSB_POWER_HSMODE) { - DBG(4, "high-speed device connected\n"); - musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; - } - - musb->port1_status &= ~USB_PORT_STAT_RESET; - musb->port1_status |= USB_PORT_STAT_ENABLE - | (USB_PORT_STAT_C_RESET << 16) - | (USB_PORT_STAT_C_ENABLE << 16); - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - - musb->vbuserr_retry = VBUSERR_RETRY_COUNT; - } -} - -void musb_root_disconnect(struct musb *musb) -{ - musb->port1_status = (1 << USB_PORT_FEAT_POWER) - | (1 << USB_PORT_FEAT_C_CONNECTION); - - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - musb->is_active = 0; - - switch (musb->xceiv.state) { - case OTG_STATE_A_HOST: - case OTG_STATE_A_SUSPEND: - musb->xceiv.state = OTG_STATE_A_WAIT_BCON; - musb->is_active = 0; - break; - case OTG_STATE_A_WAIT_VFALL: - musb->xceiv.state = OTG_STATE_B_IDLE; - break; - default: - DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); - } -} - - -/*---------------------------------------------------------------------*/ - -/* Caller may or may not hold musb->lock */ -int musb_hub_status_data(struct usb_hcd *hcd, char *buf) -{ - struct musb *musb = hcd_to_musb(hcd); - int retval = 0; - - /* called in_irq() via usb_hcd_poll_rh_status() */ - if (musb->port1_status & 0xffff0000) { - *buf = 0x02; - retval = 1; - } - return retval; -} - -int musb_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength) -{ - struct musb *musb = hcd_to_musb(hcd); - u32 temp; - int retval = 0; - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - - if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { - spin_unlock_irqrestore(&musb->lock, flags); - return -ESHUTDOWN; - } - - /* hub features: always zero, setting is a NOP - * port features: reported, sometimes updated when host is active - * no indicators - */ - switch (typeReq) { - case ClearHubFeature: - case SetHubFeature: - switch (wValue) { - case C_HUB_OVER_CURRENT: - case C_HUB_LOCAL_POWER: - break; - default: - goto error; - } - break; - case ClearPortFeature: - if ((wIndex & 0xff) != 1) - goto error; - - switch (wValue) { - case USB_PORT_FEAT_ENABLE: - break; - case USB_PORT_FEAT_SUSPEND: - musb_port_suspend(musb, false); - break; - case USB_PORT_FEAT_POWER: - if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) - musb_set_vbus(musb, 0); - break; - case USB_PORT_FEAT_C_CONNECTION: - case USB_PORT_FEAT_C_ENABLE: - case USB_PORT_FEAT_C_OVER_CURRENT: - case USB_PORT_FEAT_C_RESET: - case USB_PORT_FEAT_C_SUSPEND: - break; - default: - goto error; - } - DBG(5, "clear feature %d\n", wValue); - musb->port1_status &= ~(1 << wValue); - break; - case GetHubDescriptor: - { - struct usb_hub_descriptor *desc = (void *)buf; - - desc->bDescLength = 9; - desc->bDescriptorType = 0x29; - desc->bNbrPorts = 1; - desc->wHubCharacteristics = __constant_cpu_to_le16( - 0x0001 /* per-port power switching */ - | 0x0010 /* no overcurrent reporting */ - ); - desc->bPwrOn2PwrGood = 5; /* msec/2 */ - desc->bHubContrCurrent = 0; - - /* workaround bogus struct definition */ - desc->DeviceRemovable[0] = 0x02; /* port 1 */ - desc->DeviceRemovable[1] = 0xff; - } - break; - case GetHubStatus: - temp = 0; - *(__le32 *) buf = cpu_to_le32(temp); - break; - case GetPortStatus: - if (wIndex != 1) - goto error; - - /* finish RESET signaling? */ - if ((musb->port1_status & USB_PORT_STAT_RESET) - && time_after_eq(jiffies, musb->rh_timer)) - musb_port_reset(musb, false); - - /* finish RESUME signaling? */ - if ((musb->port1_status & MUSB_PORT_STAT_RESUME) - && time_after_eq(jiffies, musb->rh_timer)) { - u8 power; - - power = musb_readb(musb->mregs, MUSB_POWER); - power &= ~MUSB_POWER_RESUME; - DBG(4, "root port resume stopped, power %02x\n", - power); - musb_writeb(musb->mregs, MUSB_POWER, power); - - /* ISSUE: DaVinci (RTL 1.300) disconnects after - * resume of high speed peripherals (but not full - * speed ones). - */ - - musb->is_active = 1; - musb->port1_status &= ~(USB_PORT_STAT_SUSPEND - | MUSB_PORT_STAT_RESUME); - musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - /* NOTE: it might really be A_WAIT_BCON ... */ - musb->xceiv.state = OTG_STATE_A_HOST; - } - - put_unaligned(cpu_to_le32(musb->port1_status - & ~MUSB_PORT_STAT_RESUME), - (__le32 *) buf); - - /* port change status is more interesting */ - DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n", - musb->port1_status); - break; - case SetPortFeature: - if ((wIndex & 0xff) != 1) - goto error; - - switch (wValue) { - case USB_PORT_FEAT_POWER: - /* NOTE: this controller has a strange state machine - * that involves "requesting sessions" according to - * magic side effects from incompletely-described - * rules about startup... - * - * This call is what really starts the host mode; be - * very careful about side effects if you reorder any - * initialization logic, e.g. for OTG, or change any - * logic relating to VBUS power-up. - */ - if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) - musb_start(musb); - break; - case USB_PORT_FEAT_RESET: - musb_port_reset(musb, true); - break; - case USB_PORT_FEAT_SUSPEND: - musb_port_suspend(musb, true); - break; - case USB_PORT_FEAT_TEST: - if (unlikely(is_host_active(musb))) - goto error; - - wIndex >>= 8; - switch (wIndex) { - case 1: - pr_debug("TEST_J\n"); - temp = MUSB_TEST_J; - break; - case 2: - pr_debug("TEST_K\n"); - temp = MUSB_TEST_K; - break; - case 3: - pr_debug("TEST_SE0_NAK\n"); - temp = MUSB_TEST_SE0_NAK; - break; - case 4: - pr_debug("TEST_PACKET\n"); - temp = MUSB_TEST_PACKET; - musb_load_testpacket(musb); - break; - case 5: - pr_debug("TEST_FORCE_ENABLE\n"); - temp = MUSB_TEST_FORCE_HOST - | MUSB_TEST_FORCE_HS; - - musb_writeb(musb->mregs, MUSB_DEVCTL, - MUSB_DEVCTL_SESSION); - break; - case 6: - pr_debug("TEST_FIFO_ACCESS\n"); - temp = MUSB_TEST_FIFO_ACCESS; - break; - default: - goto error; - } - musb_writeb(musb->mregs, MUSB_TESTMODE, temp); - break; - default: - goto error; - } - DBG(5, "set feature %d\n", wValue); - musb->port1_status |= 1 << wValue; - break; - - default: -error: - /* "protocol stall" on error */ - retval = -EPIPE; - } - spin_unlock_irqrestore(&musb->lock, flags); - return retval; -} diff --git a/trunk/drivers/usb/musb/musbhsdma.c b/trunk/drivers/usb/musb/musbhsdma.c deleted file mode 100644 index 9ba8fb7fcd24..000000000000 --- a/trunk/drivers/usb/musb/musbhsdma.c +++ /dev/null @@ -1,433 +0,0 @@ -/* - * MUSB OTG driver - support for Mentor's DMA controller - * - * Copyright 2005 Mentor Graphics Corporation - * Copyright (C) 2005-2007 by Texas Instruments - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include -#include -#include -#include "musb_core.h" - -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) -#include "omap2430.h" -#endif - -#define MUSB_HSDMA_BASE 0x200 -#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0) -#define MUSB_HSDMA_CONTROL 0x4 -#define MUSB_HSDMA_ADDRESS 0x8 -#define MUSB_HSDMA_COUNT 0xc - -#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \ - (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset) - -/* control register (16-bit): */ -#define MUSB_HSDMA_ENABLE_SHIFT 0 -#define MUSB_HSDMA_TRANSMIT_SHIFT 1 -#define MUSB_HSDMA_MODE1_SHIFT 2 -#define MUSB_HSDMA_IRQENABLE_SHIFT 3 -#define MUSB_HSDMA_ENDPOINT_SHIFT 4 -#define MUSB_HSDMA_BUSERROR_SHIFT 8 -#define MUSB_HSDMA_BURSTMODE_SHIFT 9 -#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT) -#define MUSB_HSDMA_BURSTMODE_UNSPEC 0 -#define MUSB_HSDMA_BURSTMODE_INCR4 1 -#define MUSB_HSDMA_BURSTMODE_INCR8 2 -#define MUSB_HSDMA_BURSTMODE_INCR16 3 - -#define MUSB_HSDMA_CHANNELS 8 - -struct musb_dma_controller; - -struct musb_dma_channel { - struct dma_channel Channel; - struct musb_dma_controller *controller; - u32 dwStartAddress; - u32 len; - u16 wMaxPacketSize; - u8 bIndex; - u8 epnum; - u8 transmit; -}; - -struct musb_dma_controller { - struct dma_controller Controller; - struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS]; - void *pDmaPrivate; - void __iomem *pCoreBase; - u8 bChannelCount; - u8 bmUsedChannels; - u8 irq; -}; - -static int dma_controller_start(struct dma_controller *c) -{ - /* nothing to do */ - return 0; -} - -static void dma_channel_release(struct dma_channel *pChannel); - -static int dma_controller_stop(struct dma_controller *c) -{ - struct musb_dma_controller *controller = - container_of(c, struct musb_dma_controller, Controller); - struct musb *musb = (struct musb *) controller->pDmaPrivate; - struct dma_channel *pChannel; - u8 bBit; - - if (controller->bmUsedChannels != 0) { - dev_err(musb->controller, - "Stopping DMA controller while channel active\n"); - - for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { - if (controller->bmUsedChannels & (1 << bBit)) { - pChannel = &controller->aChannel[bBit].Channel; - dma_channel_release(pChannel); - - if (!controller->bmUsedChannels) - break; - } - } - } - return 0; -} - -static struct dma_channel *dma_channel_allocate(struct dma_controller *c, - struct musb_hw_ep *hw_ep, u8 transmit) -{ - u8 bBit; - struct dma_channel *pChannel = NULL; - struct musb_dma_channel *pImplChannel = NULL; - struct musb_dma_controller *controller = - container_of(c, struct musb_dma_controller, Controller); - - for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { - if (!(controller->bmUsedChannels & (1 << bBit))) { - controller->bmUsedChannels |= (1 << bBit); - pImplChannel = &(controller->aChannel[bBit]); - pImplChannel->controller = controller; - pImplChannel->bIndex = bBit; - pImplChannel->epnum = hw_ep->epnum; - pImplChannel->transmit = transmit; - pChannel = &(pImplChannel->Channel); - pChannel->private_data = pImplChannel; - pChannel->status = MUSB_DMA_STATUS_FREE; - pChannel->max_len = 0x10000; - /* Tx => mode 1; Rx => mode 0 */ - pChannel->desired_mode = transmit; - pChannel->actual_len = 0; - break; - } - } - return pChannel; -} - -static void dma_channel_release(struct dma_channel *pChannel) -{ - struct musb_dma_channel *pImplChannel = - (struct musb_dma_channel *) pChannel->private_data; - - pChannel->actual_len = 0; - pImplChannel->dwStartAddress = 0; - pImplChannel->len = 0; - - pImplChannel->controller->bmUsedChannels &= - ~(1 << pImplChannel->bIndex); - - pChannel->status = MUSB_DMA_STATUS_UNKNOWN; -} - -static void configure_channel(struct dma_channel *pChannel, - u16 packet_sz, u8 mode, - dma_addr_t dma_addr, u32 len) -{ - struct musb_dma_channel *pImplChannel = - (struct musb_dma_channel *) pChannel->private_data; - struct musb_dma_controller *controller = pImplChannel->controller; - void __iomem *mbase = controller->pCoreBase; - u8 bChannel = pImplChannel->bIndex; - u16 csr = 0; - - DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", - pChannel, packet_sz, dma_addr, len, mode); - - if (mode) { - csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; - BUG_ON(len < packet_sz); - - if (packet_sz >= 64) { - csr |= MUSB_HSDMA_BURSTMODE_INCR16 - << MUSB_HSDMA_BURSTMODE_SHIFT; - } else if (packet_sz >= 32) { - csr |= MUSB_HSDMA_BURSTMODE_INCR8 - << MUSB_HSDMA_BURSTMODE_SHIFT; - } else if (packet_sz >= 16) { - csr |= MUSB_HSDMA_BURSTMODE_INCR4 - << MUSB_HSDMA_BURSTMODE_SHIFT; - } - } - - csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) - | (1 << MUSB_HSDMA_ENABLE_SHIFT) - | (1 << MUSB_HSDMA_IRQENABLE_SHIFT) - | (pImplChannel->transmit - ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) - : 0); - - /* address/count */ - musb_writel(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), - dma_addr); - musb_writel(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), - len); - - /* control (this should start things) */ - musb_writew(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), - csr); -} - -static int dma_channel_program(struct dma_channel *pChannel, - u16 packet_sz, u8 mode, - dma_addr_t dma_addr, u32 len) -{ - struct musb_dma_channel *pImplChannel = - (struct musb_dma_channel *) pChannel->private_data; - - DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", - pImplChannel->epnum, - pImplChannel->transmit ? "Tx" : "Rx", - packet_sz, dma_addr, len, mode); - - BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN || - pChannel->status == MUSB_DMA_STATUS_BUSY); - - pChannel->actual_len = 0; - pImplChannel->dwStartAddress = dma_addr; - pImplChannel->len = len; - pImplChannel->wMaxPacketSize = packet_sz; - pChannel->status = MUSB_DMA_STATUS_BUSY; - - if ((mode == 1) && (len >= packet_sz)) - configure_channel(pChannel, packet_sz, 1, dma_addr, len); - else - configure_channel(pChannel, packet_sz, 0, dma_addr, len); - - return true; -} - -static int dma_channel_abort(struct dma_channel *pChannel) -{ - struct musb_dma_channel *pImplChannel = - (struct musb_dma_channel *) pChannel->private_data; - u8 bChannel = pImplChannel->bIndex; - void __iomem *mbase = pImplChannel->controller->pCoreBase; - u16 csr; - - if (pChannel->status == MUSB_DMA_STATUS_BUSY) { - if (pImplChannel->transmit) { - - csr = musb_readw(mbase, - MUSB_EP_OFFSET(pImplChannel->epnum, - MUSB_TXCSR)); - csr &= ~(MUSB_TXCSR_AUTOSET | - MUSB_TXCSR_DMAENAB | - MUSB_TXCSR_DMAMODE); - musb_writew(mbase, - MUSB_EP_OFFSET(pImplChannel->epnum, - MUSB_TXCSR), - csr); - } else { - csr = musb_readw(mbase, - MUSB_EP_OFFSET(pImplChannel->epnum, - MUSB_RXCSR)); - csr &= ~(MUSB_RXCSR_AUTOCLEAR | - MUSB_RXCSR_DMAENAB | - MUSB_RXCSR_DMAMODE); - musb_writew(mbase, - MUSB_EP_OFFSET(pImplChannel->epnum, - MUSB_RXCSR), - csr); - } - - musb_writew(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), - 0); - musb_writel(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), - 0); - musb_writel(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), - 0); - - pChannel->status = MUSB_DMA_STATUS_FREE; - } - return 0; -} - -static irqreturn_t dma_controller_irq(int irq, void *private_data) -{ - struct musb_dma_controller *controller = - (struct musb_dma_controller *)private_data; - struct musb_dma_channel *pImplChannel; - struct musb *musb = controller->pDmaPrivate; - void __iomem *mbase = controller->pCoreBase; - struct dma_channel *pChannel; - u8 bChannel; - u16 csr; - u32 dwAddress; - u8 int_hsdma; - irqreturn_t retval = IRQ_NONE; - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - - int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); - if (!int_hsdma) - goto done; - - for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) { - if (int_hsdma & (1 << bChannel)) { - pImplChannel = (struct musb_dma_channel *) - &(controller->aChannel[bChannel]); - pChannel = &pImplChannel->Channel; - - csr = musb_readw(mbase, - MUSB_HSDMA_CHANNEL_OFFSET(bChannel, - MUSB_HSDMA_CONTROL)); - - if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) - pImplChannel->Channel.status = - MUSB_DMA_STATUS_BUS_ABORT; - else { - u8 devctl; - - dwAddress = musb_readl(mbase, - MUSB_HSDMA_CHANNEL_OFFSET( - bChannel, - MUSB_HSDMA_ADDRESS)); - pChannel->actual_len = dwAddress - - pImplChannel->dwStartAddress; - - DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", - pChannel, pImplChannel->dwStartAddress, - dwAddress, pChannel->actual_len, - pImplChannel->len, - (pChannel->actual_len - < pImplChannel->len) ? - "=> reconfig 0" : "=> complete"); - - devctl = musb_readb(mbase, MUSB_DEVCTL); - - pChannel->status = MUSB_DMA_STATUS_FREE; - - /* completed */ - if ((devctl & MUSB_DEVCTL_HM) - && (pImplChannel->transmit) - && ((pChannel->desired_mode == 0) - || (pChannel->actual_len & - (pImplChannel->wMaxPacketSize - 1))) - ) { - /* Send out the packet */ - musb_ep_select(mbase, - pImplChannel->epnum); - musb_writew(mbase, MUSB_EP_OFFSET( - pImplChannel->epnum, - MUSB_TXCSR), - MUSB_TXCSR_TXPKTRDY); - } else - musb_dma_completion( - musb, - pImplChannel->epnum, - pImplChannel->transmit); - } - } - } - retval = IRQ_HANDLED; -done: - spin_unlock_irqrestore(&musb->lock, flags); - return retval; -} - -void dma_controller_destroy(struct dma_controller *c) -{ - struct musb_dma_controller *controller; - - controller = container_of(c, struct musb_dma_controller, Controller); - if (!controller) - return; - - if (controller->irq) - free_irq(controller->irq, c); - - kfree(controller); -} - -struct dma_controller *__init -dma_controller_create(struct musb *musb, void __iomem *pCoreBase) -{ - struct musb_dma_controller *controller; - struct device *dev = musb->controller; - struct platform_device *pdev = to_platform_device(dev); - int irq = platform_get_irq(pdev, 1); - - if (irq == 0) { - dev_err(dev, "No DMA interrupt line!\n"); - return NULL; - } - - controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL); - if (!controller) - return NULL; - - controller->bChannelCount = MUSB_HSDMA_CHANNELS; - controller->pDmaPrivate = musb; - controller->pCoreBase = pCoreBase; - - controller->Controller.start = dma_controller_start; - controller->Controller.stop = dma_controller_stop; - controller->Controller.channel_alloc = dma_channel_allocate; - controller->Controller.channel_release = dma_channel_release; - controller->Controller.channel_program = dma_channel_program; - controller->Controller.channel_abort = dma_channel_abort; - - if (request_irq(irq, dma_controller_irq, IRQF_DISABLED, - musb->controller->bus_id, &controller->Controller)) { - dev_err(dev, "request_irq %d failed!\n", irq); - dma_controller_destroy(&controller->Controller); - return NULL; - } - - controller->irq = irq; - - return &controller->Controller; -} diff --git a/trunk/drivers/usb/musb/omap2430.c b/trunk/drivers/usb/musb/omap2430.c deleted file mode 100644 index 298b22e6ad0d..000000000000 --- a/trunk/drivers/usb/musb/omap2430.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright (C) 2005-2007 by Texas Instruments - * Some code has been taken from tusb6010.c - * Copyrights for that are attributable to: - * Copyright (C) 2006 Nokia Corporation - * Jarkko Nikula - * Tony Lindgren - * - * This file is part of the Inventra Controller Driver for Linux. - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - * - * The Inventra Controller Driver for Linux is distributed in - * the hope that it will be useful, but WITHOUT ANY WARRANTY; - * without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public - * License for more details. - * - * You should have received a copy of the GNU General Public License - * along with The Inventra Controller Driver for Linux ; if not, - * write to the Free Software Foundation, Inc., 59 Temple Place, - * Suite 330, Boston, MA 02111-1307 USA - * - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "musb_core.h" -#include "omap2430.h" - -#ifdef CONFIG_ARCH_OMAP3430 -#define get_cpu_rev() 2 -#endif - -#define MUSB_TIMEOUT_A_WAIT_BCON 1100 - -static struct timer_list musb_idle_timer; - -static void musb_do_idle(unsigned long _musb) -{ - struct musb *musb = (void *)_musb; - unsigned long flags; - u8 power; - u8 devctl; - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - spin_lock_irqsave(&musb->lock, flags); - - switch (musb->xceiv.state) { - case OTG_STATE_A_WAIT_BCON: - devctl &= ~MUSB_DEVCTL_SESSION; - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) { - musb->xceiv.state = OTG_STATE_B_IDLE; - MUSB_DEV_MODE(musb); - } else { - musb->xceiv.state = OTG_STATE_A_IDLE; - MUSB_HST_MODE(musb); - } - break; -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_SUSPEND: - /* finish RESUME signaling? */ - if (musb->port1_status & MUSB_PORT_STAT_RESUME) { - power = musb_readb(musb->mregs, MUSB_POWER); - power &= ~MUSB_POWER_RESUME; - DBG(1, "root port resume stopped, power %02x\n", power); - musb_writeb(musb->mregs, MUSB_POWER, power); - musb->is_active = 1; - musb->port1_status &= ~(USB_PORT_STAT_SUSPEND - | MUSB_PORT_STAT_RESUME); - musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; - usb_hcd_poll_rh_status(musb_to_hcd(musb)); - /* NOTE: it might really be A_WAIT_BCON ... */ - musb->xceiv.state = OTG_STATE_A_HOST; - } - break; -#endif -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_HOST: - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (devctl & MUSB_DEVCTL_BDEVICE) - musb->xceiv.state = OTG_STATE_B_IDLE; - else - musb->xceiv.state = OTG_STATE_A_WAIT_BCON; -#endif - default: - break; - } - spin_unlock_irqrestore(&musb->lock, flags); -} - - -void musb_platform_try_idle(struct musb *musb, unsigned long timeout) -{ - unsigned long default_timeout = jiffies + msecs_to_jiffies(3); - static unsigned long last_timer; - - if (timeout == 0) - timeout = default_timeout; - - /* Never idle if active, or when VBUS timeout is not set as host */ - if (musb->is_active || ((musb->a_wait_bcon == 0) - && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { - DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); - del_timer(&musb_idle_timer); - last_timer = jiffies; - return; - } - - if (time_after(last_timer, timeout)) { - if (!timer_pending(&musb_idle_timer)) - last_timer = timeout; - else { - DBG(4, "Longer idle timer already pending, ignoring\n"); - return; - } - } - last_timer = timeout; - - DBG(4, "%s inactive, for idle timer for %lu ms\n", - otg_state_string(musb), - (unsigned long)jiffies_to_msecs(timeout - jiffies)); - mod_timer(&musb_idle_timer, timeout); -} - -void musb_platform_enable(struct musb *musb) -{ -} -void musb_platform_disable(struct musb *musb) -{ -} -static void omap_vbus_power(struct musb *musb, int is_on, int sleeping) -{ -} - -static void omap_set_vbus(struct musb *musb, int is_on) -{ - u8 devctl; - /* HDRC controls CPEN, but beware current surges during device - * connect. They can trigger transient overcurrent conditions - * that must be ignored. - */ - - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - if (is_on) { - musb->is_active = 1; - musb->xceiv.default_a = 1; - musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; - devctl |= MUSB_DEVCTL_SESSION; - - MUSB_HST_MODE(musb); - } else { - musb->is_active = 0; - - /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and - * jumping right to B_IDLE... - */ - - musb->xceiv.default_a = 0; - musb->xceiv.state = OTG_STATE_B_IDLE; - devctl &= ~MUSB_DEVCTL_SESSION; - - MUSB_DEV_MODE(musb); - } - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - DBG(1, "VBUS %s, devctl %02x " - /* otg %3x conf %08x prcm %08x */ "\n", - otg_state_string(musb), - musb_readb(musb->mregs, MUSB_DEVCTL)); -} -static int omap_set_power(struct otg_transceiver *x, unsigned mA) -{ - return 0; -} - -static int musb_platform_resume(struct musb *musb); - -void musb_platform_set_mode(struct musb *musb, u8 musb_mode) -{ - u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - devctl |= MUSB_DEVCTL_SESSION; - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - switch (musb_mode) { - case MUSB_HOST: - otg_set_host(&musb->xceiv, musb->xceiv.host); - break; - case MUSB_PERIPHERAL: - otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget); - break; - case MUSB_OTG: - break; - } -} - -int __init musb_platform_init(struct musb *musb) -{ - u32 l; - -#if defined(CONFIG_ARCH_OMAP2430) - omap_cfg_reg(AE5_2430_USB0HS_STP); -#endif - - musb_platform_resume(musb); - - l = omap_readl(OTG_SYSCONFIG); - l &= ~ENABLEWAKEUP; /* disable wakeup */ - l &= ~NOSTDBY; /* remove possible nostdby */ - l |= SMARTSTDBY; /* enable smart standby */ - l &= ~AUTOIDLE; /* disable auto idle */ - l &= ~NOIDLE; /* remove possible noidle */ - l |= SMARTIDLE; /* enable smart idle */ - l |= AUTOIDLE; /* enable auto idle */ - omap_writel(l, OTG_SYSCONFIG); - - l = omap_readl(OTG_INTERFSEL); - l |= ULPI_12PIN; - omap_writel(l, OTG_INTERFSEL); - - pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " - "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", - omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), - omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), - omap_readl(OTG_SIMENABLE)); - - omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); - - if (is_host_enabled(musb)) - musb->board_set_vbus = omap_set_vbus; - if (is_peripheral_enabled(musb)) - musb->xceiv.set_power = omap_set_power; - musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; - - setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); - - return 0; -} - -int musb_platform_suspend(struct musb *musb) -{ - u32 l; - - if (!musb->clock) - return 0; - - /* in any role */ - l = omap_readl(OTG_FORCESTDBY); - l |= ENABLEFORCE; /* enable MSTANDBY */ - omap_writel(l, OTG_FORCESTDBY); - - l = omap_readl(OTG_SYSCONFIG); - l |= ENABLEWAKEUP; /* enable wakeup */ - omap_writel(l, OTG_SYSCONFIG); - - if (musb->xceiv.set_suspend) - musb->xceiv.set_suspend(&musb->xceiv, 1); - - if (musb->set_clock) - musb->set_clock(musb->clock, 0); - else - clk_disable(musb->clock); - - return 0; -} - -static int musb_platform_resume(struct musb *musb) -{ - u32 l; - - if (!musb->clock) - return 0; - - if (musb->xceiv.set_suspend) - musb->xceiv.set_suspend(&musb->xceiv, 0); - - if (musb->set_clock) - musb->set_clock(musb->clock, 1); - else - clk_enable(musb->clock); - - l = omap_readl(OTG_SYSCONFIG); - l &= ~ENABLEWAKEUP; /* disable wakeup */ - omap_writel(l, OTG_SYSCONFIG); - - l = omap_readl(OTG_FORCESTDBY); - l &= ~ENABLEFORCE; /* disable MSTANDBY */ - omap_writel(l, OTG_FORCESTDBY); - - return 0; -} - - -int musb_platform_exit(struct musb *musb) -{ - - omap_vbus_power(musb, 0 /*off*/, 1); - - musb_platform_suspend(musb); - - clk_put(musb->clock); - musb->clock = 0; - - return 0; -} diff --git a/trunk/drivers/usb/musb/omap2430.h b/trunk/drivers/usb/musb/omap2430.h deleted file mode 100644 index 786a62071f72..000000000000 --- a/trunk/drivers/usb/musb/omap2430.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2005-2006 by Texas Instruments - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - */ - -#ifndef __MUSB_OMAP243X_H__ -#define __MUSB_OMAP243X_H__ - -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) -#include -#include - -/* - * OMAP2430-specific definitions - */ - -#define MENTOR_BASE_OFFSET 0 -#if defined(CONFIG_ARCH_OMAP2430) -#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) -#elif defined(CONFIG_ARCH_OMAP3430) -#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE) -#endif -#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset)) -#define OTG_REVISION OMAP_HSOTG(0x0) -#define OTG_SYSCONFIG OMAP_HSOTG(0x4) -# define MIDLEMODE 12 /* bit position */ -# define FORCESTDBY (0 << MIDLEMODE) -# define NOSTDBY (1 << MIDLEMODE) -# define SMARTSTDBY (2 << MIDLEMODE) -# define SIDLEMODE 3 /* bit position */ -# define FORCEIDLE (0 << SIDLEMODE) -# define NOIDLE (1 << SIDLEMODE) -# define SMARTIDLE (2 << SIDLEMODE) -# define ENABLEWAKEUP (1 << 2) -# define SOFTRST (1 << 1) -# define AUTOIDLE (1 << 0) -#define OTG_SYSSTATUS OMAP_HSOTG(0x8) -# define RESETDONE (1 << 0) -#define OTG_INTERFSEL OMAP_HSOTG(0xc) -# define EXTCP (1 << 2) -# define PHYSEL 0 /* bit position */ -# define UTMI_8BIT (0 << PHYSEL) -# define ULPI_12PIN (1 << PHYSEL) -# define ULPI_8PIN (2 << PHYSEL) -#define OTG_SIMENABLE OMAP_HSOTG(0x10) -# define TM1 (1 << 0) -#define OTG_FORCESTDBY OMAP_HSOTG(0x14) -# define ENABLEFORCE (1 << 0) - -#endif /* CONFIG_ARCH_OMAP2430 */ - -#endif /* __MUSB_OMAP243X_H__ */ diff --git a/trunk/drivers/usb/musb/tusb6010.c b/trunk/drivers/usb/musb/tusb6010.c deleted file mode 100644 index b73b036f3d77..000000000000 --- a/trunk/drivers/usb/musb/tusb6010.c +++ /dev/null @@ -1,1151 +0,0 @@ -/* - * TUSB6010 USB 2.0 OTG Dual Role controller - * - * Copyright (C) 2006 Nokia Corporation - * Jarkko Nikula - * Tony Lindgren - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Notes: - * - Driver assumes that interface to external host (main CPU) is - * configured for NOR FLASH interface instead of VLYNQ serial - * interface. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" - -static void tusb_source_power(struct musb *musb, int is_on); - -#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) -#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) - -/* - * Checks the revision. We need to use the DMA register as 3.0 does not - * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. - */ -u8 tusb_get_revision(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - u32 die_id; - u8 rev; - - rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff; - if (TUSB_REV_MAJOR(rev) == 3) { - die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, - TUSB_DIDR1_HI)); - if (die_id >= TUSB_DIDR1_HI_REV_31) - rev |= 1; - } - - return rev; -} - -static int __init tusb_print_revision(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - u8 rev; - - rev = tusb_get_revision(musb); - - pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n", - "prcm", - TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)), - TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)), - "int", - TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), - TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), - "gpio", - TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)), - TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)), - "dma", - TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), - TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), - "dieid", - TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)), - "rev", - TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev)); - - return tusb_get_revision(musb); -} - -#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \ - | TUSB_PHY_OTG_CTRL_TESTM0) - -/* - * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0. - * Disables power detection in PHY for the duration of idle. - */ -static void tusb_wbus_quirk(struct musb *musb, int enabled) -{ - void __iomem *tbase = musb->ctrl_base; - static u32 phy_otg_ctrl, phy_otg_ena; - u32 tmp; - - if (enabled) { - phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); - phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); - tmp = TUSB_PHY_OTG_CTRL_WRPROTECT - | phy_otg_ena | WBUS_QUIRK_MASK; - musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); - tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; - tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; - musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); - DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", - musb_readl(tbase, TUSB_PHY_OTG_CTRL), - musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); - } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) - & TUSB_PHY_OTG_CTRL_TESTM2) { - tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl; - musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); - tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; - musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); - DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", - musb_readl(tbase, TUSB_PHY_OTG_CTRL), - musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); - phy_otg_ctrl = 0; - phy_otg_ena = 0; - } -} - -/* - * TUSB 6010 may use a parallel bus that doesn't support byte ops; - * so both loading and unloading FIFOs need explicit byte counts. - */ - -static inline void -tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) -{ - u32 val; - int i; - - if (len > 4) { - for (i = 0; i < (len >> 2); i++) { - memcpy(&val, buf, 4); - musb_writel(fifo, 0, val); - buf += 4; - } - len %= 4; - } - if (len > 0) { - /* Write the rest 1 - 3 bytes to FIFO */ - memcpy(&val, buf, len); - musb_writel(fifo, 0, val); - } -} - -static inline void tusb_fifo_read_unaligned(void __iomem *fifo, - void __iomem *buf, u16 len) -{ - u32 val; - int i; - - if (len > 4) { - for (i = 0; i < (len >> 2); i++) { - val = musb_readl(fifo, 0); - memcpy(buf, &val, 4); - buf += 4; - } - len %= 4; - } - if (len > 0) { - /* Read the rest 1 - 3 bytes from FIFO */ - val = musb_readl(fifo, 0); - memcpy(buf, &val, len); - } -} - -void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) -{ - void __iomem *ep_conf = hw_ep->conf; - void __iomem *fifo = hw_ep->fifo; - u8 epnum = hw_ep->epnum; - - prefetch(buf); - - DBG(4, "%cX ep%d fifo %p count %d buf %p\n", - 'T', epnum, fifo, len, buf); - - if (epnum) - musb_writel(ep_conf, TUSB_EP_TX_OFFSET, - TUSB_EP_CONFIG_XFR_SIZE(len)); - else - musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX | - TUSB_EP0_CONFIG_XFR_SIZE(len)); - - if (likely((0x01 & (unsigned long) buf) == 0)) { - - /* Best case is 32bit-aligned destination address */ - if ((0x02 & (unsigned long) buf) == 0) { - if (len >= 4) { - writesl(fifo, buf, len >> 2); - buf += (len & ~0x03); - len &= 0x03; - } - } else { - if (len >= 2) { - u32 val; - int i; - - /* Cannot use writesw, fifo is 32-bit */ - for (i = 0; i < (len >> 2); i++) { - val = (u32)(*(u16 *)buf); - buf += 2; - val |= (*(u16 *)buf) << 16; - buf += 2; - musb_writel(fifo, 0, val); - } - len &= 0x03; - } - } - } - - if (len > 0) - tusb_fifo_write_unaligned(fifo, buf, len); -} - -void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) -{ - void __iomem *ep_conf = hw_ep->conf; - void __iomem *fifo = hw_ep->fifo; - u8 epnum = hw_ep->epnum; - - DBG(4, "%cX ep%d fifo %p count %d buf %p\n", - 'R', epnum, fifo, len, buf); - - if (epnum) - musb_writel(ep_conf, TUSB_EP_RX_OFFSET, - TUSB_EP_CONFIG_XFR_SIZE(len)); - else - musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len)); - - if (likely((0x01 & (unsigned long) buf) == 0)) { - - /* Best case is 32bit-aligned destination address */ - if ((0x02 & (unsigned long) buf) == 0) { - if (len >= 4) { - readsl(fifo, buf, len >> 2); - buf += (len & ~0x03); - len &= 0x03; - } - } else { - if (len >= 2) { - u32 val; - int i; - - /* Cannot use readsw, fifo is 32-bit */ - for (i = 0; i < (len >> 2); i++) { - val = musb_readl(fifo, 0); - *(u16 *)buf = (u16)(val & 0xffff); - buf += 2; - *(u16 *)buf = (u16)(val >> 16); - buf += 2; - } - len &= 0x03; - } - } - } - - if (len > 0) - tusb_fifo_read_unaligned(fifo, buf, len); -} - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - -/* This is used by gadget drivers, and OTG transceiver logic, allowing - * at most mA current to be drawn from VBUS during a Default-B session - * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host - * mode), or low power Default-B sessions, something else supplies power. - * Caller must take care of locking. - */ -static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) -{ - struct musb *musb = container_of(x, struct musb, xceiv); - void __iomem *tbase = musb->ctrl_base; - u32 reg; - - /* - * Keep clock active when enabled. Note that this is not tied to - * drawing VBUS, as with OTG mA can be less than musb->min_power. - */ - if (musb->set_clock) { - if (mA) - musb->set_clock(musb->clock, 1); - else - musb->set_clock(musb->clock, 0); - } - - /* tps65030 seems to consume max 100mA, with maybe 60mA available - * (measured on one board) for things other than tps and tusb. - * - * Boards sharing the CPU clock with CLKIN will need to prevent - * certain idle sleep states while the USB link is active. - * - * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }. - * The actual current usage would be very board-specific. For now, - * it's simpler to just use an aggregate (also board-specific). - */ - if (x->default_a || mA < (musb->min_power << 1)) - mA = 0; - - reg = musb_readl(tbase, TUSB_PRCM_MNGMT); - if (mA) { - musb->is_bus_powered = 1; - reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN; - } else { - musb->is_bus_powered = 0; - reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); - } - musb_writel(tbase, TUSB_PRCM_MNGMT, reg); - - DBG(2, "draw max %d mA VBUS\n", mA); - return 0; -} - -#else -#define tusb_draw_power NULL -#endif - -/* workaround for issue 13: change clock during chip idle - * (to be fixed in rev3 silicon) ... symptoms include disconnect - * or looping suspend/resume cycles - */ -static void tusb_set_clock_source(struct musb *musb, unsigned mode) -{ - void __iomem *tbase = musb->ctrl_base; - u32 reg; - - reg = musb_readl(tbase, TUSB_PRCM_CONF); - reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3); - - /* 0 = refclk (clkin, XI) - * 1 = PHY 60 MHz (internal PLL) - * 2 = not supported - * 3 = what? - */ - if (mode > 0) - reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3); - - musb_writel(tbase, TUSB_PRCM_CONF, reg); - - /* FIXME tusb6010_platform_retime(mode == 0); */ -} - -/* - * Idle TUSB6010 until next wake-up event; NOR access always wakes. - * Other code ensures that we idle unless we're connected _and_ the - * USB link is not suspended ... and tells us the relevant wakeup - * events. SW_EN for voltage is handled separately. - */ -void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) -{ - void __iomem *tbase = musb->ctrl_base; - u32 reg; - - if ((wakeup_enables & TUSB_PRCM_WBUS) - && (tusb_get_revision(musb) == TUSB_REV_30)) - tusb_wbus_quirk(musb, 1); - - tusb_set_clock_source(musb, 0); - - wakeup_enables |= TUSB_PRCM_WNORCS; - musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables); - - /* REVISIT writeup of WID implies that if WID set and ID is grounded, - * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared. - * Presumably that's mostly to save power, hence WID is immaterial ... - */ - - reg = musb_readl(tbase, TUSB_PRCM_MNGMT); - /* issue 4: when driving vbus, use hipower (vbus_det) comparator */ - if (is_host_active(musb)) { - reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; - reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN; - } else { - reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN; - reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; - } - reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; - musb_writel(tbase, TUSB_PRCM_MNGMT, reg); - - DBG(6, "idle, wake on %02x\n", wakeup_enables); -} - -/* - * Updates cable VBUS status. Caller must take care of locking. - */ -int musb_platform_get_vbus_status(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - u32 otg_stat, prcm_mngmt; - int ret = 0; - - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT); - - /* Temporarily enable VBUS detection if it was disabled for - * suspend mode. Unless it's enabled otg_stat and devctl will - * not show correct VBUS state. - */ - if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) { - u32 tmp = prcm_mngmt; - tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; - musb_writel(tbase, TUSB_PRCM_MNGMT, tmp); - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt); - } - - if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) - ret = 1; - - return ret; -} - -static struct timer_list musb_idle_timer; - -static void musb_do_idle(unsigned long _musb) -{ - struct musb *musb = (void *)_musb; - unsigned long flags; - - spin_lock_irqsave(&musb->lock, flags); - - switch (musb->xceiv.state) { - case OTG_STATE_A_WAIT_BCON: - if ((musb->a_wait_bcon != 0) - && (musb->idle_timeout == 0 - || time_after(jiffies, musb->idle_timeout))) { - DBG(4, "Nothing connected %s, turning off VBUS\n", - otg_state_string(musb)); - } - /* FALLTHROUGH */ - case OTG_STATE_A_IDLE: - tusb_source_power(musb, 0); - default: - break; - } - - if (!musb->is_active) { - u32 wakeups; - - /* wait until khubd handles port change status */ - if (is_host_active(musb) && (musb->port1_status >> 16)) - goto done; - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - if (is_peripheral_enabled(musb) && !musb->gadget_driver) - wakeups = 0; - else { - wakeups = TUSB_PRCM_WHOSTDISCON - | TUSB_PRCM_WBUS - | TUSB_PRCM_WVBUS; - if (is_otg_enabled(musb)) - wakeups |= TUSB_PRCM_WID; - } -#else - wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS; -#endif - tusb_allow_idle(musb, wakeups); - } -done: - spin_unlock_irqrestore(&musb->lock, flags); -} - -/* - * Maybe put TUSB6010 into idle mode mode depending on USB link status, - * like "disconnected" or "suspended". We'll be woken out of it by - * connect, resume, or disconnect. - * - * Needs to be called as the last function everywhere where there is - * register access to TUSB6010 because of NOR flash wake-up. - * Caller should own controller spinlock. - * - * Delay because peripheral enables D+ pullup 3msec after SE0, and - * we don't want to treat that full speed J as a wakeup event. - * ... peripherals must draw only suspend current after 10 msec. - */ -void musb_platform_try_idle(struct musb *musb, unsigned long timeout) -{ - unsigned long default_timeout = jiffies + msecs_to_jiffies(3); - static unsigned long last_timer; - - if (timeout == 0) - timeout = default_timeout; - - /* Never idle if active, or when VBUS timeout is not set as host */ - if (musb->is_active || ((musb->a_wait_bcon == 0) - && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { - DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); - del_timer(&musb_idle_timer); - last_timer = jiffies; - return; - } - - if (time_after(last_timer, timeout)) { - if (!timer_pending(&musb_idle_timer)) - last_timer = timeout; - else { - DBG(4, "Longer idle timer already pending, ignoring\n"); - return; - } - } - last_timer = timeout; - - DBG(4, "%s inactive, for idle timer for %lu ms\n", - otg_state_string(musb), - (unsigned long)jiffies_to_msecs(timeout - jiffies)); - mod_timer(&musb_idle_timer, timeout); -} - -/* ticks of 60 MHz clock */ -#define DEVCLOCK 60000000 -#define OTG_TIMER_MS(msecs) ((msecs) \ - ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \ - | TUSB_DEV_OTG_TIMER_ENABLE) \ - : 0) - -static void tusb_source_power(struct musb *musb, int is_on) -{ - void __iomem *tbase = musb->ctrl_base; - u32 conf, prcm, timer; - u8 devctl; - - /* HDRC controls CPEN, but beware current surges during device - * connect. They can trigger transient overcurrent conditions - * that must be ignored. - */ - - prcm = musb_readl(tbase, TUSB_PRCM_MNGMT); - conf = musb_readl(tbase, TUSB_DEV_CONF); - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - - if (is_on) { - if (musb->set_clock) - musb->set_clock(musb->clock, 1); - timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); - musb->xceiv.default_a = 1; - musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; - devctl |= MUSB_DEVCTL_SESSION; - - conf |= TUSB_DEV_CONF_USB_HOST_MODE; - MUSB_HST_MODE(musb); - } else { - u32 otg_stat; - - timer = 0; - - /* If ID pin is grounded, we want to be a_idle */ - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { - switch (musb->xceiv.state) { - case OTG_STATE_A_WAIT_VRISE: - case OTG_STATE_A_WAIT_BCON: - musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; - break; - case OTG_STATE_A_WAIT_VFALL: - musb->xceiv.state = OTG_STATE_A_IDLE; - break; - default: - musb->xceiv.state = OTG_STATE_A_IDLE; - } - musb->is_active = 0; - musb->xceiv.default_a = 1; - MUSB_HST_MODE(musb); - } else { - musb->is_active = 0; - musb->xceiv.default_a = 0; - musb->xceiv.state = OTG_STATE_B_IDLE; - MUSB_DEV_MODE(musb); - } - - devctl &= ~MUSB_DEVCTL_SESSION; - conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; - if (musb->set_clock) - musb->set_clock(musb->clock, 0); - } - prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); - - musb_writel(tbase, TUSB_PRCM_MNGMT, prcm); - musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer); - musb_writel(tbase, TUSB_DEV_CONF, conf); - musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); - - DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", - otg_state_string(musb), - musb_readb(musb->mregs, MUSB_DEVCTL), - musb_readl(tbase, TUSB_DEV_OTG_STAT), - conf, prcm); -} - -/* - * Sets the mode to OTG, peripheral or host by changing the ID detection. - * Caller must take care of locking. - * - * Note that if a mini-A cable is plugged in the ID line will stay down as - * the weak ID pull-up is not able to pull the ID up. - * - * REVISIT: It would be possible to add support for changing between host - * and peripheral modes in non-OTG configurations by reconfiguring hardware - * and then setting musb->board_mode. For now, only support OTG mode. - */ -void musb_platform_set_mode(struct musb *musb, u8 musb_mode) -{ - void __iomem *tbase = musb->ctrl_base; - u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; - - if (musb->board_mode != MUSB_OTG) { - ERR("Changing mode currently only supported in OTG mode\n"); - return; - } - - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); - phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); - dev_conf = musb_readl(tbase, TUSB_DEV_CONF); - - switch (musb_mode) { - -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case MUSB_HOST: /* Disable PHY ID detect, ground ID */ - phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - dev_conf |= TUSB_DEV_CONF_ID_SEL; - dev_conf &= ~TUSB_DEV_CONF_SOFT_ID; - break; -#endif - -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */ - phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); - break; -#endif - -#ifdef CONFIG_USB_MUSB_OTG - case MUSB_OTG: /* Use PHY ID detection */ - phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); - break; -#endif - - default: - DBG(2, "Trying to set unknown mode %i\n", musb_mode); - } - - musb_writel(tbase, TUSB_PHY_OTG_CTRL, - TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl); - musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, - TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena); - musb_writel(tbase, TUSB_DEV_CONF, dev_conf); - - otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - if ((musb_mode == MUSB_PERIPHERAL) && - !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) - INFO("Cannot be peripheral with mini-A cable " - "otg_stat: %08x\n", otg_stat); -} - -static inline unsigned long -tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) -{ - u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); - unsigned long idle_timeout = 0; - - /* ID pin */ - if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { - int default_a; - - if (is_otg_enabled(musb)) - default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); - else - default_a = is_host_enabled(musb); - DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); - musb->xceiv.default_a = default_a; - tusb_source_power(musb, default_a); - - /* Don't allow idling immediately */ - if (default_a) - idle_timeout = jiffies + (HZ * 3); - } - - /* VBUS state change */ - if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { - - /* B-dev state machine: no vbus ~= disconnect */ - if ((is_otg_enabled(musb) && !musb->xceiv.default_a) - || !is_host_enabled(musb)) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* ? musb_root_disconnect(musb); */ - musb->port1_status &= - ~(USB_PORT_STAT_CONNECTION - | USB_PORT_STAT_ENABLE - | USB_PORT_STAT_LOW_SPEED - | USB_PORT_STAT_HIGH_SPEED - | USB_PORT_STAT_TEST - ); -#endif - - if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { - DBG(1, "Forcing disconnect (no interrupt)\n"); - if (musb->xceiv.state != OTG_STATE_B_IDLE) { - /* INTR_DISCONNECT can hide... */ - musb->xceiv.state = OTG_STATE_B_IDLE; - musb->int_usb |= MUSB_INTR_DISCONNECT; - } - musb->is_active = 0; - } - DBG(2, "vbus change, %s, otg %03x\n", - otg_state_string(musb), otg_stat); - idle_timeout = jiffies + (1 * HZ); - schedule_work(&musb->irq_work); - - } else /* A-dev state machine */ { - DBG(2, "vbus change, %s, otg %03x\n", - otg_state_string(musb), otg_stat); - - switch (musb->xceiv.state) { - case OTG_STATE_A_IDLE: - DBG(2, "Got SRP, turning on VBUS\n"); - musb_set_vbus(musb, 1); - - /* CONNECT can wake if a_wait_bcon is set */ - if (musb->a_wait_bcon != 0) - musb->is_active = 0; - else - musb->is_active = 1; - - /* - * OPT FS A TD.4.6 needs few seconds for - * A_WAIT_VRISE - */ - idle_timeout = jiffies + (2 * HZ); - - break; - case OTG_STATE_A_WAIT_VRISE: - /* ignore; A-session-valid < VBUS_VALID/2, - * we monitor this with the timer - */ - break; - case OTG_STATE_A_WAIT_VFALL: - /* REVISIT this irq triggers during short - * spikes caused by enumeration ... - */ - if (musb->vbuserr_retry) { - musb->vbuserr_retry--; - tusb_source_power(musb, 1); - } else { - musb->vbuserr_retry - = VBUSERR_RETRY_COUNT; - tusb_source_power(musb, 0); - } - break; - default: - break; - } - } - } - - /* OTG timer expiration */ - if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { - u8 devctl; - - DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); - - switch (musb->xceiv.state) { - case OTG_STATE_A_WAIT_VRISE: - /* VBUS has probably been valid for a while now, - * but may well have bounced out of range a bit - */ - devctl = musb_readb(musb->mregs, MUSB_DEVCTL); - if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { - if ((devctl & MUSB_DEVCTL_VBUS) - != MUSB_DEVCTL_VBUS) { - DBG(2, "devctl %02x\n", devctl); - break; - } - musb->xceiv.state = OTG_STATE_A_WAIT_BCON; - musb->is_active = 0; - idle_timeout = jiffies - + msecs_to_jiffies(musb->a_wait_bcon); - } else { - /* REVISIT report overcurrent to hub? */ - ERR("vbus too slow, devctl %02x\n", devctl); - tusb_source_power(musb, 0); - } - break; - case OTG_STATE_A_WAIT_BCON: - if (musb->a_wait_bcon != 0) - idle_timeout = jiffies - + msecs_to_jiffies(musb->a_wait_bcon); - break; - case OTG_STATE_A_SUSPEND: - break; - case OTG_STATE_B_WAIT_ACON: - break; - default: - break; - } - } - schedule_work(&musb->irq_work); - - return idle_timeout; -} - -static irqreturn_t tusb_interrupt(int irq, void *__hci) -{ - struct musb *musb = __hci; - void __iomem *tbase = musb->ctrl_base; - unsigned long flags, idle_timeout = 0; - u32 int_mask, int_src; - - spin_lock_irqsave(&musb->lock, flags); - - /* Mask all interrupts to allow using both edge and level GPIO irq */ - int_mask = musb_readl(tbase, TUSB_INT_MASK); - musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); - - int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; - DBG(3, "TUSB IRQ %08x\n", int_src); - - musb->int_usb = (u8) int_src; - - /* Acknowledge wake-up source interrupts */ - if (int_src & TUSB_INT_SRC_DEV_WAKEUP) { - u32 reg; - u32 i; - - if (tusb_get_revision(musb) == TUSB_REV_30) - tusb_wbus_quirk(musb, 0); - - /* there are issues re-locking the PLL on wakeup ... */ - - /* work around issue 8 */ - for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) { - musb_writel(tbase, TUSB_SCRATCH_PAD, 0); - musb_writel(tbase, TUSB_SCRATCH_PAD, i); - reg = musb_readl(tbase, TUSB_SCRATCH_PAD); - if (reg == i) - break; - DBG(6, "TUSB NOR not ready\n"); - } - - /* work around issue 13 (2nd half) */ - tusb_set_clock_source(musb, 1); - - reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE); - musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); - if (reg & ~TUSB_PRCM_WNORCS) { - musb->is_active = 1; - schedule_work(&musb->irq_work); - } - DBG(3, "wake %sactive %02x\n", - musb->is_active ? "" : "in", reg); - - /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ - } - - if (int_src & TUSB_INT_SRC_USB_IP_CONN) - del_timer(&musb_idle_timer); - - /* OTG state change reports (annoyingly) not issued by Mentor core */ - if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG - | TUSB_INT_SRC_OTG_TIMEOUT - | TUSB_INT_SRC_ID_STATUS_CHNG)) - idle_timeout = tusb_otg_ints(musb, int_src, tbase); - - /* TX dma callback must be handled here, RX dma callback is - * handled in tusb_omap_dma_cb. - */ - if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) { - u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); - u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); - - DBG(3, "DMA IRQ %08x\n", dma_src); - real_dma_src = ~real_dma_src & dma_src; - if (tusb_dma_omap() && real_dma_src) { - int tx_source = (real_dma_src & 0xffff); - int i; - - for (i = 1; i <= 15; i++) { - if (tx_source & (1 << i)) { - DBG(3, "completing ep%i %s\n", i, "tx"); - musb_dma_completion(musb, i, 1); - } - } - } - musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src); - } - - /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */ - if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) { - u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC); - - musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src); - musb->int_rx = (((musb_src >> 16) & 0xffff) << 1); - musb->int_tx = (musb_src & 0xffff); - } else { - musb->int_rx = 0; - musb->int_tx = 0; - } - - if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff)) - musb_interrupt(musb); - - /* Acknowledge TUSB interrupts. Clear only non-reserved bits */ - musb_writel(tbase, TUSB_INT_SRC_CLEAR, - int_src & ~TUSB_INT_MASK_RESERVED_BITS); - - musb_platform_try_idle(musb, idle_timeout); - - musb_writel(tbase, TUSB_INT_MASK, int_mask); - spin_unlock_irqrestore(&musb->lock, flags); - - return IRQ_HANDLED; -} - -static int dma_off; - -/* - * Enables TUSB6010. Caller must take care of locking. - * REVISIT: - * - Check what is unnecessary in MGC_HdrcStart() - */ -void musb_platform_enable(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - - /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF. - * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */ - musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF); - - /* Setup TUSB interrupt, disable DMA and GPIO interrupts */ - musb_writel(tbase, TUSB_USBIP_INT_MASK, 0); - musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); - musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); - - /* Clear all subsystem interrups */ - musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff); - musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff); - musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff); - - /* Acknowledge pending interrupt(s) */ - musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS); - - /* Only 0 clock cycles for minimum interrupt de-assertion time and - * interrupt polarity active low seems to work reliably here */ - musb_writel(tbase, TUSB_INT_CTRL_CONF, - TUSB_INT_CTRL_CONF_INT_RELCYC(0)); - - set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); - - /* maybe force into the Default-A OTG state machine */ - if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) - & TUSB_DEV_OTG_STAT_ID_STATUS)) - musb_writel(tbase, TUSB_INT_SRC_SET, - TUSB_INT_SRC_ID_STATUS_CHNG); - - if (is_dma_capable() && dma_off) - printk(KERN_WARNING "%s %s: dma not reactivated\n", - __FILE__, __func__); - else - dma_off = 1; -} - -/* - * Disables TUSB6010. Caller must take care of locking. - */ -void musb_platform_disable(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - - /* FIXME stop DMA, IRQs, timers, ... */ - - /* disable all IRQs */ - musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); - musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff); - musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); - musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); - - del_timer(&musb_idle_timer); - - if (is_dma_capable() && !dma_off) { - printk(KERN_WARNING "%s %s: dma still active\n", - __FILE__, __func__); - dma_off = 1; - } -} - -/* - * Sets up TUSB6010 CPU interface specific signals and registers - * Note: Settings optimized for OMAP24xx - */ -static void __init tusb_setup_cpu_interface(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - - /* - * Disable GPIO[5:0] pullups (used as output DMA requests) - * Don't disable GPIO[7:6] as they are needed for wake-up. - */ - musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F); - - /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */ - musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF); - - /* Turn GPIO[5:0] to DMAREQ[5:0] signals */ - musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f)); - - /* Burst size 16x16 bits, all six DMA requests enabled, DMA request - * de-assertion time 2 system clocks p 62 */ - musb_writel(tbase, TUSB_DMA_REQ_CONF, - TUSB_DMA_REQ_CONF_BURST_SIZE(2) | - TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | - TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); - - /* Set 0 wait count for synchronous burst access */ - musb_writel(tbase, TUSB_WAIT_COUNT, 1); -} - -static int __init tusb_start(struct musb *musb) -{ - void __iomem *tbase = musb->ctrl_base; - int ret = 0; - unsigned long flags; - u32 reg; - - if (musb->board_set_power) - ret = musb->board_set_power(1); - if (ret != 0) { - printk(KERN_ERR "tusb: Cannot enable TUSB6010\n"); - return ret; - } - - spin_lock_irqsave(&musb->lock, flags); - - if (musb_readl(tbase, TUSB_PROD_TEST_RESET) != - TUSB_PROD_TEST_RESET_VAL) { - printk(KERN_ERR "tusb: Unable to detect TUSB6010\n"); - goto err; - } - - ret = tusb_print_revision(musb); - if (ret < 2) { - printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n", - ret); - goto err; - } - - /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when - * NOR FLASH interface is used */ - musb_writel(tbase, TUSB_VLYNQ_CTRL, 8); - - /* Select PHY free running 60MHz as a system clock */ - tusb_set_clock_source(musb, 1); - - /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for - * power saving, enable VBus detect and session end comparators, - * enable IDpullup, enable VBus charging */ - musb_writel(tbase, TUSB_PRCM_MNGMT, - TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) | - TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN | - TUSB_PRCM_MNGMT_OTG_SESS_END_EN | - TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN | - TUSB_PRCM_MNGMT_OTG_ID_PULLUP); - tusb_setup_cpu_interface(musb); - - /* simplify: always sense/pullup ID pins, as if in OTG mode */ - reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); - reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg); - - reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL); - reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; - musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg); - - spin_unlock_irqrestore(&musb->lock, flags); - - return 0; - -err: - spin_unlock_irqrestore(&musb->lock, flags); - - if (musb->board_set_power) - musb->board_set_power(0); - - return -ENODEV; -} - -int __init musb_platform_init(struct musb *musb) -{ - struct platform_device *pdev; - struct resource *mem; - void __iomem *sync; - int ret; - - pdev = to_platform_device(musb->controller); - - /* dma address for async dma */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - musb->async = mem->start; - - /* dma address for sync dma */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!mem) { - pr_debug("no sync dma resource?\n"); - return -ENODEV; - } - musb->sync = mem->start; - - sync = ioremap(mem->start, mem->end - mem->start + 1); - if (!sync) { - pr_debug("ioremap for sync failed\n"); - return -ENOMEM; - } - musb->sync_va = sync; - - /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400, - * FIFOs at 0x600, TUSB at 0x800 - */ - musb->mregs += TUSB_BASE_OFFSET; - - ret = tusb_start(musb); - if (ret) { - printk(KERN_ERR "Could not start tusb6010 (%d)\n", - ret); - return -ENODEV; - } - musb->isr = tusb_interrupt; - - if (is_host_enabled(musb)) - musb->board_set_vbus = tusb_source_power; - if (is_peripheral_enabled(musb)) - musb->xceiv.set_power = tusb_draw_power; - - setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); - - return ret; -} - -int musb_platform_exit(struct musb *musb) -{ - del_timer_sync(&musb_idle_timer); - - if (musb->board_set_power) - musb->board_set_power(0); - - iounmap(musb->sync_va); - - return 0; -} diff --git a/trunk/drivers/usb/musb/tusb6010.h b/trunk/drivers/usb/musb/tusb6010.h deleted file mode 100644 index ab8c96286ce6..000000000000 --- a/trunk/drivers/usb/musb/tusb6010.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller - * - * Copyright (C) 2006 Nokia Corporation - * Jarkko Nikula - * Tony Lindgren - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __TUSB6010_H__ -#define __TUSB6010_H__ - -extern u8 tusb_get_revision(struct musb *musb); - -#ifdef CONFIG_USB_TUSB6010 -#define musb_in_tusb() 1 -#else -#define musb_in_tusb() 0 -#endif - -#ifdef CONFIG_USB_TUSB_OMAP_DMA -#define tusb_dma_omap() 1 -#else -#define tusb_dma_omap() 0 -#endif - -/* VLYNQ control register. 32-bit at offset 0x000 */ -#define TUSB_VLYNQ_CTRL 0x004 - -/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */ -#define TUSB_BASE_OFFSET 0x400 - -/* FIFO registers 32-bit at offset 0x600 */ -#define TUSB_FIFO_BASE 0x600 - -/* Device System & Control registers. 32-bit at offset 0x800 */ -#define TUSB_SYS_REG_BASE 0x800 - -#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) -#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) -#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) -#define TUSB_DEV_CONF_SOFT_ID (1 << 1) -#define TUSB_DEV_CONF_ID_SEL (1 << 0) - -#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) -#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) -#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) -#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23) -#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19) -#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18) -#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) -#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) -#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) -#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) -#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) -#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) -#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) -#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) -#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) -#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7) -#define TUSB_PHY_OTG_CTRL_PD (1 << 6) -#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) -#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) -#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) -#define TUSB_PHY_OTG_CTRL_RESET (1 << 2) -#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) -#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) - -/*OTG status register */ -#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) -#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) -#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) -#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) -#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) -#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) -#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) -#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) -#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) -#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) -#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) - -#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) -# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) -# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) -#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) - -/* PRCM configuration register */ -#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) -#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) -#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) - -/* PRCM management register */ -#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) -#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25) -#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) -#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20) -#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19) -#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) -#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) -#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) -#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) -#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) -#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) -#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) -#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) -#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) -#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) - -/* Wake-up source clear and mask registers */ -#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) -#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) -#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) -#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) -#define TUSB_PRCM_WGPIO_7 (1 << 12) -#define TUSB_PRCM_WGPIO_6 (1 << 11) -#define TUSB_PRCM_WGPIO_5 (1 << 10) -#define TUSB_PRCM_WGPIO_4 (1 << 9) -#define TUSB_PRCM_WGPIO_3 (1 << 8) -#define TUSB_PRCM_WGPIO_2 (1 << 7) -#define TUSB_PRCM_WGPIO_1 (1 << 6) -#define TUSB_PRCM_WGPIO_0 (1 << 5) -#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ -#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ -#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ -#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ -#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ - -#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) -#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) -#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) -#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) -#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) -#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) -#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) -#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) -#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) -#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) -#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) -#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) -#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) -#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) -#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) -#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) - -/* NOR flash interrupt source registers */ -#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) -#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) -#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) -#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) -#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) -#define TUSB_INT_SRC_USB_IP_CORE (1 << 17) -#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) -#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) -#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) -#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) -#define TUSB_INT_SRC_DEV_READY (1 << 12) -#define TUSB_INT_SRC_USB_IP_TX (1 << 9) -#define TUSB_INT_SRC_USB_IP_RX (1 << 8) -#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) -#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) -#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) -#define TUSB_INT_SRC_USB_IP_CONN (1 << 4) -#define TUSB_INT_SRC_USB_IP_SOF (1 << 3) -#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) -#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) -#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) - -/* NOR flash interrupt registers reserved bits. Must be written as 0 */ -#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17) -#define TUSB_INT_MASK_RESERVED_13 (1 << 13) -#define TUSB_INT_MASK_RESERVED_8 (0xf << 8) -#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26) -#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18) -#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10) - -/* Reserved bits for NOR flash interrupt mask and clear register */ -#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \ - TUSB_INT_MASK_RESERVED_13 | \ - TUSB_INT_MASK_RESERVED_8) - -/* Reserved bits for NOR flash interrupt status register */ -#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \ - TUSB_INT_SRC_RESERVED_18 | \ - TUSB_INT_SRC_RESERVED_10) - -#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) -#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) -#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) -#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) -#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) -#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) - -/* Offsets from each ep base register */ -#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */ -#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */ -#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188 - -#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) -#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) -#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) - -/* Device System & Control register bitfields */ -#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18) -#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) -#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) -#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) -#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) -#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20) -#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16) -#define TUSB_EP0_CONFIG_SW_EN (1 << 8) -#define TUSB_EP0_CONFIG_DIR_TX (1 << 7) -#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) -#define TUSB_EP_CONFIG_SW_EN (1 << 31) -#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) -#define TUSB_PROD_TEST_RESET_VAL 0xa596 -#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20) - -#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) -#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) -#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf) -#define TUSB_DIDR1_HI_REV_20 0 -#define TUSB_DIDR1_HI_REV_30 1 -#define TUSB_DIDR1_HI_REV_31 2 - -#define TUSB_REV_10 0x10 -#define TUSB_REV_20 0x20 -#define TUSB_REV_30 0x30 -#define TUSB_REV_31 0x31 - -#endif /* __TUSB6010_H__ */ diff --git a/trunk/drivers/usb/musb/tusb6010_omap.c b/trunk/drivers/usb/musb/tusb6010_omap.c deleted file mode 100644 index 52f7f29cebda..000000000000 --- a/trunk/drivers/usb/musb/tusb6010_omap.c +++ /dev/null @@ -1,719 +0,0 @@ -/* - * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface - * - * Copyright (C) 2006 Nokia Corporation - * Tony Lindgren - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "musb_core.h" - -#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) - -#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ - -struct tusb_omap_dma_ch { - struct musb *musb; - void __iomem *tbase; - unsigned long phys_offset; - int epnum; - u8 tx; - struct musb_hw_ep *hw_ep; - - int ch; - s8 dmareq; - s8 sync_dev; - - struct tusb_omap_dma *tusb_dma; - - void __iomem *dma_addr; - - u32 len; - u16 packet_sz; - u16 transfer_packet_sz; - u32 transfer_len; - u32 completed_len; -}; - -struct tusb_omap_dma { - struct dma_controller controller; - struct musb *musb; - void __iomem *tbase; - - int ch; - s8 dmareq; - s8 sync_dev; - unsigned multichannel:1; -}; - -static int tusb_omap_dma_start(struct dma_controller *c) -{ - struct tusb_omap_dma *tusb_dma; - - tusb_dma = container_of(c, struct tusb_omap_dma, controller); - - /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ - - return 0; -} - -static int tusb_omap_dma_stop(struct dma_controller *c) -{ - struct tusb_omap_dma *tusb_dma; - - tusb_dma = container_of(c, struct tusb_omap_dma, controller); - - /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ - - return 0; -} - -/* - * Allocate dmareq0 to the current channel unless it's already taken - */ -static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) -{ - u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); - - if (reg != 0) { - DBG(3, "ep%i dmareq0 is busy for ep%i\n", - chdat->epnum, reg & 0xf); - return -EAGAIN; - } - - if (chdat->tx) - reg = (1 << 4) | chdat->epnum; - else - reg = chdat->epnum; - - musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); - - return 0; -} - -static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) -{ - u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); - - if ((reg & 0xf) != chdat->epnum) { - printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", - chdat->epnum, reg & 0xf); - return; - } - musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0); -} - -/* - * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in - * musb_gadget.c. - */ -static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) -{ - struct dma_channel *channel = (struct dma_channel *)data; - struct tusb_omap_dma_ch *chdat = to_chdat(channel); - struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; - struct musb *musb = chdat->musb; - struct musb_hw_ep *hw_ep = chdat->hw_ep; - void __iomem *ep_conf = hw_ep->conf; - void __iomem *mbase = musb->mregs; - unsigned long remaining, flags, pio; - int ch; - - spin_lock_irqsave(&musb->lock, flags); - - if (tusb_dma->multichannel) - ch = chdat->ch; - else - ch = tusb_dma->ch; - - if (ch_status != OMAP_DMA_BLOCK_IRQ) - printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); - - DBG(3, "ep%i %s dma callback ch: %i status: %x\n", - chdat->epnum, chdat->tx ? "tx" : "rx", - ch, ch_status); - - if (chdat->tx) - remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); - else - remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); - - remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); - - /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ - if (unlikely(remaining > chdat->transfer_len)) { - DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", - chdat->tx ? "tx" : "rx", chdat->ch, - remaining); - remaining = 0; - } - - channel->actual_len = chdat->transfer_len - remaining; - pio = chdat->len - channel->actual_len; - - DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); - - /* Transfer remaining 1 - 31 bytes */ - if (pio > 0 && pio < 32) { - u8 *buf; - - DBG(3, "Using PIO for remaining %lu bytes\n", pio); - buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; - if (chdat->tx) { - dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), - chdat->transfer_len, DMA_TO_DEVICE); - musb_write_fifo(hw_ep, pio, buf); - } else { - musb_read_fifo(hw_ep, pio, buf); - dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), - chdat->transfer_len, DMA_FROM_DEVICE); - } - channel->actual_len += pio; - } - - if (!tusb_dma->multichannel) - tusb_omap_free_shared_dmareq(chdat); - - channel->status = MUSB_DMA_STATUS_FREE; - - /* Handle only RX callbacks here. TX callbacks must be handled based - * on the TUSB DMA status interrupt. - * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback - * interrupt for RX and TX. - */ - if (!chdat->tx) - musb_dma_completion(musb, chdat->epnum, chdat->tx); - - /* We must terminate short tx transfers manually by setting TXPKTRDY. - * REVISIT: This same problem may occur with other MUSB dma as well. - * Easy to test with g_ether by pinging the MUSB board with ping -s54. - */ - if ((chdat->transfer_len < chdat->packet_sz) - || (chdat->transfer_len % chdat->packet_sz != 0)) { - u16 csr; - - if (chdat->tx) { - DBG(3, "terminating short tx packet\n"); - musb_ep_select(mbase, chdat->epnum); - csr = musb_readw(hw_ep->regs, MUSB_TXCSR); - csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY - | MUSB_TXCSR_P_WZC_BITS; - musb_writew(hw_ep->regs, MUSB_TXCSR, csr); - } - } - - spin_unlock_irqrestore(&musb->lock, flags); -} - -static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, - u8 rndis_mode, dma_addr_t dma_addr, u32 len) -{ - struct tusb_omap_dma_ch *chdat = to_chdat(channel); - struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; - struct musb *musb = chdat->musb; - struct musb_hw_ep *hw_ep = chdat->hw_ep; - void __iomem *mbase = musb->mregs; - void __iomem *ep_conf = hw_ep->conf; - dma_addr_t fifo = hw_ep->fifo_sync; - struct omap_dma_channel_params dma_params; - u32 dma_remaining; - int src_burst, dst_burst; - u16 csr; - int ch; - s8 dmareq; - s8 sync_dev; - - if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) - return false; - - /* - * HW issue #10: Async dma will eventually corrupt the XFR_SIZE - * register which will cause missed DMA interrupt. We could try to - * use a timer for the callback, but it is unsafe as the XFR_SIZE - * register is corrupt, and we won't know if the DMA worked. - */ - if (dma_addr & 0x2) - return false; - - /* - * Because of HW issue #10, it seems like mixing sync DMA and async - * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before - * using the channel for DMA. - */ - if (chdat->tx) - dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); - else - dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); - - dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); - if (dma_remaining) { - DBG(2, "Busy %s dma ch%i, not using: %08x\n", - chdat->tx ? "tx" : "rx", chdat->ch, - dma_remaining); - return false; - } - - chdat->transfer_len = len & ~0x1f; - - if (len < packet_sz) - chdat->transfer_packet_sz = chdat->transfer_len; - else - chdat->transfer_packet_sz = packet_sz; - - if (tusb_dma->multichannel) { - ch = chdat->ch; - dmareq = chdat->dmareq; - sync_dev = chdat->sync_dev; - } else { - if (tusb_omap_use_shared_dmareq(chdat) != 0) { - DBG(3, "could not get dma for ep%i\n", chdat->epnum); - return false; - } - if (tusb_dma->ch < 0) { - /* REVISIT: This should get blocked earlier, happens - * with MSC ErrorRecoveryTest - */ - WARN_ON(1); - return false; - } - - ch = tusb_dma->ch; - dmareq = tusb_dma->dmareq; - sync_dev = tusb_dma->sync_dev; - omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); - } - - chdat->packet_sz = packet_sz; - chdat->len = len; - channel->actual_len = 0; - chdat->dma_addr = (void __iomem *)dma_addr; - channel->status = MUSB_DMA_STATUS_BUSY; - - /* Since we're recycling dma areas, we need to clean or invalidate */ - if (chdat->tx) - dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); - else - dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); - - /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ - if ((dma_addr & 0x3) == 0) { - dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; - dma_params.elem_count = 8; /* Elements in frame */ - } else { - dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; - dma_params.elem_count = 16; /* Elements in frame */ - fifo = hw_ep->fifo_async; - } - - dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ - - DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", - chdat->epnum, chdat->tx ? "tx" : "rx", - ch, dma_addr, chdat->transfer_len, len, - chdat->transfer_packet_sz, packet_sz); - - /* - * Prepare omap DMA for transfer - */ - if (chdat->tx) { - dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; - dma_params.src_start = (unsigned long)dma_addr; - dma_params.src_ei = 0; - dma_params.src_fi = 0; - - dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; - dma_params.dst_start = (unsigned long)fifo; - dma_params.dst_ei = 1; - dma_params.dst_fi = -31; /* Loop 32 byte window */ - - dma_params.trigger = sync_dev; - dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; - dma_params.src_or_dst_synch = 0; /* Dest sync */ - - src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ - dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ - } else { - dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; - dma_params.src_start = (unsigned long)fifo; - dma_params.src_ei = 1; - dma_params.src_fi = -31; /* Loop 32 byte window */ - - dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; - dma_params.dst_start = (unsigned long)dma_addr; - dma_params.dst_ei = 0; - dma_params.dst_fi = 0; - - dma_params.trigger = sync_dev; - dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; - dma_params.src_or_dst_synch = 1; /* Source sync */ - - src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ - dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ - } - - DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", - chdat->epnum, chdat->tx ? "tx" : "rx", - (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, - ((dma_addr & 0x3) == 0) ? "sync" : "async", - dma_params.src_start, dma_params.dst_start); - - omap_set_dma_params(ch, &dma_params); - omap_set_dma_src_burst_mode(ch, src_burst); - omap_set_dma_dest_burst_mode(ch, dst_burst); - omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); - - /* - * Prepare MUSB for DMA transfer - */ - if (chdat->tx) { - musb_ep_select(mbase, chdat->epnum); - csr = musb_readw(hw_ep->regs, MUSB_TXCSR); - csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB - | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); - csr &= ~MUSB_TXCSR_P_UNDERRUN; - musb_writew(hw_ep->regs, MUSB_TXCSR, csr); - } else { - musb_ep_select(mbase, chdat->epnum); - csr = musb_readw(hw_ep->regs, MUSB_RXCSR); - csr |= MUSB_RXCSR_DMAENAB; - csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); - musb_writew(hw_ep->regs, MUSB_RXCSR, - csr | MUSB_RXCSR_P_WZC_BITS); - } - - /* - * Start DMA transfer - */ - omap_start_dma(ch); - - if (chdat->tx) { - /* Send transfer_packet_sz packets at a time */ - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, - chdat->transfer_packet_sz); - - musb_writel(ep_conf, TUSB_EP_TX_OFFSET, - TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); - } else { - /* Receive transfer_packet_sz packets at a time */ - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, - chdat->transfer_packet_sz << 16); - - musb_writel(ep_conf, TUSB_EP_RX_OFFSET, - TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); - } - - return true; -} - -static int tusb_omap_dma_abort(struct dma_channel *channel) -{ - struct tusb_omap_dma_ch *chdat = to_chdat(channel); - struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; - - if (!tusb_dma->multichannel) { - if (tusb_dma->ch >= 0) { - omap_stop_dma(tusb_dma->ch); - omap_free_dma(tusb_dma->ch); - tusb_dma->ch = -1; - } - - tusb_dma->dmareq = -1; - tusb_dma->sync_dev = -1; - } - - channel->status = MUSB_DMA_STATUS_FREE; - - return 0; -} - -static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) -{ - u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); - int i, dmareq_nr = -1; - - const int sync_dev[6] = { - OMAP24XX_DMA_EXT_DMAREQ0, - OMAP24XX_DMA_EXT_DMAREQ1, - OMAP242X_DMA_EXT_DMAREQ2, - OMAP242X_DMA_EXT_DMAREQ3, - OMAP242X_DMA_EXT_DMAREQ4, - OMAP242X_DMA_EXT_DMAREQ5, - }; - - for (i = 0; i < MAX_DMAREQ; i++) { - int cur = (reg & (0xf << (i * 5))) >> (i * 5); - if (cur == 0) { - dmareq_nr = i; - break; - } - } - - if (dmareq_nr == -1) - return -EAGAIN; - - reg |= (chdat->epnum << (dmareq_nr * 5)); - if (chdat->tx) - reg |= ((1 << 4) << (dmareq_nr * 5)); - musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); - - chdat->dmareq = dmareq_nr; - chdat->sync_dev = sync_dev[chdat->dmareq]; - - return 0; -} - -static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) -{ - u32 reg; - - if (!chdat || chdat->dmareq < 0) - return; - - reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); - reg &= ~(0x1f << (chdat->dmareq * 5)); - musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); - - chdat->dmareq = -1; - chdat->sync_dev = -1; -} - -static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; - -static struct dma_channel * -tusb_omap_dma_allocate(struct dma_controller *c, - struct musb_hw_ep *hw_ep, - u8 tx) -{ - int ret, i; - const char *dev_name; - struct tusb_omap_dma *tusb_dma; - struct musb *musb; - void __iomem *tbase; - struct dma_channel *channel = NULL; - struct tusb_omap_dma_ch *chdat = NULL; - u32 reg; - - tusb_dma = container_of(c, struct tusb_omap_dma, controller); - musb = tusb_dma->musb; - tbase = musb->ctrl_base; - - reg = musb_readl(tbase, TUSB_DMA_INT_MASK); - if (tx) - reg &= ~(1 << hw_ep->epnum); - else - reg &= ~(1 << (hw_ep->epnum + 15)); - musb_writel(tbase, TUSB_DMA_INT_MASK, reg); - - /* REVISIT: Why does dmareq5 not work? */ - if (hw_ep->epnum == 0) { - DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); - return NULL; - } - - for (i = 0; i < MAX_DMAREQ; i++) { - struct dma_channel *ch = dma_channel_pool[i]; - if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { - ch->status = MUSB_DMA_STATUS_FREE; - channel = ch; - chdat = ch->private_data; - break; - } - } - - if (!channel) - return NULL; - - if (tx) { - chdat->tx = 1; - dev_name = "TUSB transmit"; - } else { - chdat->tx = 0; - dev_name = "TUSB receive"; - } - - chdat->musb = tusb_dma->musb; - chdat->tbase = tusb_dma->tbase; - chdat->hw_ep = hw_ep; - chdat->epnum = hw_ep->epnum; - chdat->dmareq = -1; - chdat->completed_len = 0; - chdat->tusb_dma = tusb_dma; - - channel->max_len = 0x7fffffff; - channel->desired_mode = 0; - channel->actual_len = 0; - - if (tusb_dma->multichannel) { - ret = tusb_omap_dma_allocate_dmareq(chdat); - if (ret != 0) - goto free_dmareq; - - ret = omap_request_dma(chdat->sync_dev, dev_name, - tusb_omap_dma_cb, channel, &chdat->ch); - if (ret != 0) - goto free_dmareq; - } else if (tusb_dma->ch == -1) { - tusb_dma->dmareq = 0; - tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0; - - /* Callback data gets set later in the shared dmareq case */ - ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared", - tusb_omap_dma_cb, NULL, &tusb_dma->ch); - if (ret != 0) - goto free_dmareq; - - chdat->dmareq = -1; - chdat->ch = -1; - } - - DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", - chdat->epnum, - chdat->tx ? "tx" : "rx", - chdat->ch >= 0 ? "dedicated" : "shared", - chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, - chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq, - chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev); - - return channel; - -free_dmareq: - tusb_omap_dma_free_dmareq(chdat); - - DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); - channel->status = MUSB_DMA_STATUS_UNKNOWN; - - return NULL; -} - -static void tusb_omap_dma_release(struct dma_channel *channel) -{ - struct tusb_omap_dma_ch *chdat = to_chdat(channel); - struct musb *musb = chdat->musb; - void __iomem *tbase = musb->ctrl_base; - u32 reg; - - DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); - - reg = musb_readl(tbase, TUSB_DMA_INT_MASK); - if (chdat->tx) - reg |= (1 << chdat->epnum); - else - reg |= (1 << (chdat->epnum + 15)); - musb_writel(tbase, TUSB_DMA_INT_MASK, reg); - - reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR); - if (chdat->tx) - reg |= (1 << chdat->epnum); - else - reg |= (1 << (chdat->epnum + 15)); - musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg); - - channel->status = MUSB_DMA_STATUS_UNKNOWN; - - if (chdat->ch >= 0) { - omap_stop_dma(chdat->ch); - omap_free_dma(chdat->ch); - chdat->ch = -1; - } - - if (chdat->dmareq >= 0) - tusb_omap_dma_free_dmareq(chdat); - - channel = NULL; -} - -void dma_controller_destroy(struct dma_controller *c) -{ - struct tusb_omap_dma *tusb_dma; - int i; - - tusb_dma = container_of(c, struct tusb_omap_dma, controller); - for (i = 0; i < MAX_DMAREQ; i++) { - struct dma_channel *ch = dma_channel_pool[i]; - if (ch) { - kfree(ch->private_data); - kfree(ch); - } - } - - if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) - omap_free_dma(tusb_dma->ch); - - kfree(tusb_dma); -} - -struct dma_controller *__init -dma_controller_create(struct musb *musb, void __iomem *base) -{ - void __iomem *tbase = musb->ctrl_base; - struct tusb_omap_dma *tusb_dma; - int i; - - /* REVISIT: Get dmareq lines used from board-*.c */ - - musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff); - musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0); - - musb_writel(tbase, TUSB_DMA_REQ_CONF, - TUSB_DMA_REQ_CONF_BURST_SIZE(2) - | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) - | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); - - tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); - if (!tusb_dma) - goto cleanup; - - tusb_dma->musb = musb; - tusb_dma->tbase = musb->ctrl_base; - - tusb_dma->ch = -1; - tusb_dma->dmareq = -1; - tusb_dma->sync_dev = -1; - - tusb_dma->controller.start = tusb_omap_dma_start; - tusb_dma->controller.stop = tusb_omap_dma_stop; - tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; - tusb_dma->controller.channel_release = tusb_omap_dma_release; - tusb_dma->controller.channel_program = tusb_omap_dma_program; - tusb_dma->controller.channel_abort = tusb_omap_dma_abort; - - if (tusb_get_revision(musb) >= TUSB_REV_30) - tusb_dma->multichannel = 1; - - for (i = 0; i < MAX_DMAREQ; i++) { - struct dma_channel *ch; - struct tusb_omap_dma_ch *chdat; - - ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); - if (!ch) - goto cleanup; - - dma_channel_pool[i] = ch; - - chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); - if (!chdat) - goto cleanup; - - ch->status = MUSB_DMA_STATUS_UNKNOWN; - ch->private_data = chdat; - } - - return &tusb_dma->controller; - -cleanup: - dma_controller_destroy(&tusb_dma->controller); - - return NULL; -} diff --git a/trunk/drivers/usb/serial/Kconfig b/trunk/drivers/usb/serial/Kconfig index 70338f4ec918..8878c1767fc8 100644 --- a/trunk/drivers/usb/serial/Kconfig +++ b/trunk/drivers/usb/serial/Kconfig @@ -499,10 +499,9 @@ config USB_SERIAL_SAFE_PADDED config USB_SERIAL_SIERRAWIRELESS tristate "USB Sierra Wireless Driver" help - Say M here if you want to use Sierra Wireless devices. - - Many deviecs have a feature known as TRU-Install, for those devices - to work properly the USB Storage Sierra feature must be enabled. + Say M here if you want to use a Sierra Wireless device (if + using an PC 5220 or AC580 please use the Airprime driver + instead). To compile this driver as a module, choose M here: the module will be called sierra. diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index 984f6eff4c47..838717250145 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -563,7 +563,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, @@ -638,7 +637,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, @@ -648,10 +646,6 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID), - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, { }, /* Optional parameter entry */ diff --git a/trunk/drivers/usb/serial/ftdi_sio.h b/trunk/drivers/usb/serial/ftdi_sio.h index 382265bba969..a577ea44dcf9 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.h +++ b/trunk/drivers/usb/serial/ftdi_sio.h @@ -524,9 +524,7 @@ #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ -#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ -#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ /* * Definitions for ID TECH (www.idt-net.com) devices @@ -817,11 +815,6 @@ #define OLIMEX_VID 0x15BA #define OLIMEX_ARM_USB_OCD_PID 0x0003 -/* Luminary Micro Stellaris Boards, VID = FTDI_VID */ -/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */ -#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 -#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 - /* www.elsterelectricity.com Elster Unicom III Optical Probe */ #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index e143198aeb02..e4eca95f2b0f 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -186,23 +186,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 -#define BANDRICH_PRODUCT_1004 0x1004 -#define BANDRICH_PRODUCT_1005 0x1005 -#define BANDRICH_PRODUCT_1006 0x1006 -#define BANDRICH_PRODUCT_1007 0x1007 -#define BANDRICH_PRODUCT_1008 0x1008 -#define BANDRICH_PRODUCT_1009 0x1009 -#define BANDRICH_PRODUCT_100A 0x100a - -#define BANDRICH_PRODUCT_100B 0x100b -#define BANDRICH_PRODUCT_100C 0x100c -#define BANDRICH_PRODUCT_100D 0x100d -#define BANDRICH_PRODUCT_100E 0x100e - -#define BANDRICH_PRODUCT_100F 0x100f -#define BANDRICH_PRODUCT_1010 0x1010 -#define BANDRICH_PRODUCT_1011 0x1011 -#define BANDRICH_PRODUCT_1012 0x1012 #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_9508 0x0800 @@ -214,10 +197,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 -/* ZTE PRODUCTS */ -#define ZTE_VENDOR_ID 0x19d2 -#define ZTE_PRODUCT_MF628 0x0015 - static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -323,28 +302,12 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, - { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, - { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -383,7 +346,11 @@ static struct usb_serial_driver option_1port_device = { .read_int_callback = option_instat_callback, }; +#ifdef CONFIG_USB_DEBUG static int debug; +#else +#define debug 0 +#endif /* per port private data */ @@ -987,5 +954,8 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); +#ifdef CONFIG_USB_DEBUG module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages"); +#endif + diff --git a/trunk/drivers/usb/serial/pl2303.c b/trunk/drivers/usb/serial/pl2303.c index 1ede1441cb1b..2c9c446ad625 100644 --- a/trunk/drivers/usb/serial/pl2303.c +++ b/trunk/drivers/usb/serial/pl2303.c @@ -90,6 +90,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, + { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) }, { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/trunk/drivers/usb/serial/pl2303.h b/trunk/drivers/usb/serial/pl2303.h index a3bd039c78e9..6ac3bbcf7a22 100644 --- a/trunk/drivers/usb/serial/pl2303.h +++ b/trunk/drivers/usb/serial/pl2303.h @@ -107,6 +107,10 @@ #define COREGA_VENDOR_ID 0x07aa #define COREGA_PRODUCT_ID 0x002a +/* HL HL-340 (ID: 4348:5523) */ +#define HL340_VENDOR_ID 0x4348 +#define HL340_PRODUCT_ID 0x5523 + /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ #define YCCABLE_VENDOR_ID 0x05ad #define YCCABLE_PRODUCT_ID 0x0fba diff --git a/trunk/drivers/usb/serial/sierra.c b/trunk/drivers/usb/serial/sierra.c index 706033753adb..2f6f1523ec56 100644 --- a/trunk/drivers/usb/serial/sierra.c +++ b/trunk/drivers/usb/serial/sierra.c @@ -14,7 +14,7 @@ Whom based his on the Keyspan driver by Hugh Blemings */ -#define DRIVER_VERSION "v.1.2.13a" +#define DRIVER_VERSION "v.1.2.9c" #define DRIVER_AUTHOR "Kevin Lloyd " #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" @@ -31,7 +31,6 @@ #define SWIMS_USB_REQUEST_SetPower 0x00 #define SWIMS_USB_REQUEST_SetNmea 0x07 #define SWIMS_USB_REQUEST_SetMode 0x0B -#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A #define SWIMS_SET_MODE_Modem 0x0001 /* per port private data */ @@ -41,11 +40,18 @@ static int debug; static int nmea; +static int truinstall = 1; + +enum devicetype { + DEVICE_3_PORT = 0, + DEVICE_1_PORT = 1, + DEVICE_INSTALLER = 2, +}; static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) { int result; - dev_dbg(&udev->dev, "%s", __func__); + dev_dbg(&udev->dev, "%s", "SET POWER STATE\n"); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetPower, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ @@ -57,10 +63,25 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) return result; } +static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) +{ + int result; + dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n"); + result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + SWIMS_USB_REQUEST_SetMode, /* __u8 request */ + USB_TYPE_VENDOR, /* __u8 request type */ + eSWocMode, /* __u16 value */ + 0x0000, /* __u16 index */ + NULL, /* void *data */ + 0, /* __u16 size */ + USB_CTRL_SET_TIMEOUT); /* int timeout */ + return result; +} + static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) { int result; - dev_dbg(&udev->dev, "%s", __func__); + dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n"); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ @@ -76,7 +97,6 @@ static int sierra_calc_num_ports(struct usb_serial *serial) { int result; int *num_ports = usb_get_serial_data(serial); - dev_dbg(&serial->dev->dev, "%s", __func__); result = *num_ports; @@ -90,23 +110,22 @@ static int sierra_calc_num_ports(struct usb_serial *serial) static int sierra_calc_interface(struct usb_serial *serial) { - int interface; - struct usb_interface *p_interface; - struct usb_host_interface *p_host_interface; - dev_dbg(&serial->dev->dev, "%s", __func__); + int interface; + struct usb_interface *p_interface; + struct usb_host_interface *p_host_interface; - /* Get the interface structure pointer from the serial struct */ - p_interface = serial->interface; + /* Get the interface structure pointer from the serial struct */ + p_interface = serial->interface; - /* Get a pointer to the host interface structure */ - p_host_interface = p_interface->cur_altsetting; + /* Get a pointer to the host interface structure */ + p_host_interface = p_interface->cur_altsetting; - /* read the interface descriptor for this active altsetting - * to find out the interface number we are on - */ - interface = p_host_interface->desc.bInterfaceNumber; + /* read the interface descriptor for this active altsetting + * to find out the interface number we are on + */ + interface = p_host_interface->desc.bInterfaceNumber; - return interface; + return interface; } static int sierra_probe(struct usb_serial *serial, @@ -116,40 +135,43 @@ static int sierra_probe(struct usb_serial *serial, struct usb_device *udev; int *num_ports; u8 ifnum; - u8 numendpoints; - - dev_dbg(&serial->dev->dev, "%s", __func__); num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); if (!num_ports) return -ENOMEM; ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; - numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints; udev = serial->dev; - /* Figure out the interface number from the serial structure */ - ifnum = sierra_calc_interface(serial); + /* Figure out the interface number from the serial structure */ + ifnum = sierra_calc_interface(serial); - /* - * If this interface supports more than 1 alternate - * select the 2nd one - */ - if (serial->interface->num_altsetting == 2) { - dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n", - ifnum); - /* We know the alternate setting is 1 for the MC8785 */ - usb_set_interface(udev, ifnum, 1); - } + /* + * If this interface supports more than 1 alternate + * select the 2nd one + */ + if (serial->interface->num_altsetting == 2) { + dev_dbg(&udev->dev, + "Selecting alt setting for interface %d\n", + ifnum); - /* Dummy interface present on some SKUs should be ignored */ - if (ifnum == 0x99) - *num_ports = 0; - else if (numendpoints <= 3) + /* We know the alternate setting is 1 for the MC8785 */ + usb_set_interface(udev, ifnum, 1); + } + + /* Check if in installer mode */ + if (truinstall && id->driver_info == DEVICE_INSTALLER) { + dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n"); + result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); + /* Don't bind to the device when in installer mode */ + kfree(num_ports); + return -EIO; + } else if (id->driver_info == DEVICE_1_PORT) *num_ports = 1; + else if (ifnum == 0x99) + *num_ports = 0; else - *num_ports = (numendpoints-1)/2; - + *num_ports = 3; /* * save off our num_ports info so that we can use it in the * calc_num_ports callback @@ -165,50 +187,40 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ - { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ - /* Sierra Wireless C597 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, - /* Sierra Wireless Device */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, - { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */ { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ - { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */ + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ - { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ - { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ - { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ - { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ - { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ - { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */ + { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ + { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ + { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/ { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ - { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ - { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ - /* Sierra Wireless C885 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, - /* Sierra Wireless Device */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, - /* Sierra Wireless Device */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, - - { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ - { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ + { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ + { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ + + { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */ + { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */ + + { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ + { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ + { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, { } }; MODULE_DEVICE_TABLE(usb, id_table); @@ -256,19 +268,13 @@ static int sierra_send_setup(struct tty_struct *tty, if (portdata->rts_state) val |= 0x02; - /* If composite device then properly report interface */ - if (serial->num_ports == 1) - interface = sierra_calc_interface(serial); - - /* Otherwise the need to do non-composite mapping */ - else { - if (port->bulk_out_endpointAddress == 2) - interface = 0; - else if (port->bulk_out_endpointAddress == 4) - interface = 1; - else if (port->bulk_out_endpointAddress == 5) - interface = 2; - } + /* Determine which port is targeted */ + if (port->bulk_out_endpointAddress == 2) + interface = 0; + else if (port->bulk_out_endpointAddress == 4) + interface = 1; + else if (port->bulk_out_endpointAddress == 5) + interface = 2; return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), @@ -707,7 +713,7 @@ static void sierra_shutdown(struct usb_serial *serial) static struct usb_serial_driver sierra_device = { .driver = { .owner = THIS_MODULE, - .name = "sierra", + .name = "sierra1", }, .description = "Sierra USB modem", .id_table = id_table, @@ -763,8 +769,14 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); -module_param(nmea, bool, S_IRUGO | S_IWUSR); +module_param(truinstall, bool, 0); +MODULE_PARM_DESC(truinstall, "TRU-Install support"); + +module_param(nmea, bool, 0); MODULE_PARM_DESC(nmea, "NMEA streaming"); +#ifdef CONFIG_USB_DEBUG module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages"); +#endif + diff --git a/trunk/drivers/usb/serial/usb-serial.c b/trunk/drivers/usb/serial/usb-serial.c index b157c48e8b78..8c2d531eedea 100644 --- a/trunk/drivers/usb/serial/usb-serial.c +++ b/trunk/drivers/usb/serial/usb-serial.c @@ -122,6 +122,9 @@ static void return_serial(struct usb_serial *serial) dbg("%s", __func__); + if (serial == NULL) + return; + for (i = 0; i < serial->num_ports; ++i) serial_table[serial->minor + i] = NULL; } @@ -139,8 +142,7 @@ static void destroy_serial(struct kref *kref) serial->type->shutdown(serial); /* return the minor range that this device had */ - if (serial->minor != SERIAL_TTY_NO_MINOR) - return_serial(serial); + return_serial(serial); for (i = 0; i < serial->num_ports; ++i) serial->port[i]->port.count = 0; @@ -573,7 +575,6 @@ static struct usb_serial *create_serial(struct usb_device *dev, serial->interface = interface; kref_init(&serial->kref); mutex_init(&serial->disc_mutex); - serial->minor = SERIAL_TTY_NO_MINOR; return serial; } diff --git a/trunk/drivers/usb/storage/Kconfig b/trunk/drivers/usb/storage/Kconfig index c76034672c18..3d9249632ae1 100644 --- a/trunk/drivers/usb/storage/Kconfig +++ b/trunk/drivers/usb/storage/Kconfig @@ -146,18 +146,6 @@ config USB_STORAGE_KARMA on the resulting scsi device node returns the Karma to normal operation. -config USB_STORAGE_SIERRA - bool "Sierra Wireless TRU-Install Feature Support" - depends on USB_STORAGE - help - Say Y here to include additional code to support Sierra Wireless - products with the TRU-Install feature (e.g., AC597E, AC881U). - - This code switches the Sierra Wireless device from being in - Mass Storage mode to Modem mode. It also has the ability to - support host software upgrades should full Linux support be added - to TRU-Install. - config USB_STORAGE_CYPRESS_ATACB bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" depends on USB_STORAGE diff --git a/trunk/drivers/usb/storage/Makefile b/trunk/drivers/usb/storage/Makefile index bc3415b475c9..4c596c766c53 100644 --- a/trunk/drivers/usb/storage/Makefile +++ b/trunk/drivers/usb/storage/Makefile @@ -21,7 +21,6 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o -usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ diff --git a/trunk/drivers/usb/storage/sierra_ms.c b/trunk/drivers/usb/storage/sierra_ms.c deleted file mode 100644 index 4359a2cb42df..000000000000 --- a/trunk/drivers/usb/storage/sierra_ms.c +++ /dev/null @@ -1,207 +0,0 @@ -#include -#include -#include -#include -#include - -#include "usb.h" -#include "transport.h" -#include "protocol.h" -#include "scsiglue.h" -#include "sierra_ms.h" -#include "debug.h" - -#define SWIMS_USB_REQUEST_SetSwocMode 0x0B -#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A -#define SWIMS_USB_INDEX_SetMode 0x0000 -#define SWIMS_SET_MODE_Modem 0x0001 - -#define TRU_NORMAL 0x01 -#define TRU_FORCE_MS 0x02 -#define TRU_FORCE_MODEM 0x03 - -static unsigned int swi_tru_install = 1; -module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def)," - " 2=Force CD-Rom, 3=Force Modem)"); - -struct swoc_info { - __u8 rev; - __u8 reserved[8]; - __u16 LinuxSKU; - __u16 LinuxVer; - __u8 reserved2[47]; -} __attribute__((__packed__)); - -static bool containsFullLinuxPackage(struct swoc_info *swocInfo) -{ - if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) || - (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF)) - return true; - else - return false; -} - -static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) -{ - int result; - US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n"); - result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), - SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */ - USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */ - eSWocMode, /* __u16 value */ - 0x0000, /* __u16 index */ - NULL, /* void *data */ - 0, /* __u16 size */ - USB_CTRL_SET_TIMEOUT); /* int timeout */ - return result; -} - - -static int sierra_get_swoc_info(struct usb_device *udev, - struct swoc_info *swocInfo) -{ - int result; - - US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n"); - - result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */ - USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */ - 0, /* __u16 value */ - 0, /* __u16 index */ - (void *) swocInfo, /* void *data */ - sizeof(struct swoc_info), /* __u16 size */ - USB_CTRL_SET_TIMEOUT); /* int timeout */ - - swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU); - swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer); - return result; -} - -static void debug_swoc(struct swoc_info *swocInfo) -{ - US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev); - US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU); - US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer); -} - - -static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct swoc_info *swocInfo; - struct usb_interface *intf = to_usb_interface(dev); - struct usb_device *udev = interface_to_usbdev(intf); - int result; - if (swi_tru_install == TRU_FORCE_MS) { - result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n"); - } else { - swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); - if (!swocInfo) { - US_DEBUGP("SWIMS: Allocation failure\n"); - snprintf(buf, PAGE_SIZE, "Error\n"); - return -ENOMEM; - } - result = sierra_get_swoc_info(udev, swocInfo); - if (result < 0) { - US_DEBUGP("SWIMS: failed SWoC query\n"); - kfree(swocInfo); - snprintf(buf, PAGE_SIZE, "Error\n"); - return -EIO; - } - debug_swoc(swocInfo); - result = snprintf(buf, PAGE_SIZE, - "REV=%02d SKU=%04X VER=%04X\n", - swocInfo->rev, - swocInfo->LinuxSKU, - swocInfo->LinuxVer); - kfree(swocInfo); - } - return result; -} -static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); - -int sierra_ms_init(struct us_data *us) -{ - int result, retries; - signed long delay_t; - struct swoc_info *swocInfo; - struct usb_device *udev; - struct Scsi_Host *sh; - struct scsi_device *sd; - - delay_t = 2; - retries = 3; - result = 0; - udev = us->pusb_dev; - - sh = us_to_host(us); - sd = scsi_get_host_dev(sh); - - US_DEBUGP("SWIMS: sierra_ms_init called\n"); - - /* Force Modem mode */ - if (swi_tru_install == TRU_FORCE_MODEM) { - US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n"); - result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); - if (result < 0) - US_DEBUGP("SWIMS: Failed to switch to modem mode.\n"); - return -EIO; - } - /* Force Mass Storage mode (keep CD-Rom) */ - else if (swi_tru_install == TRU_FORCE_MS) { - US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n"); - goto complete; - } - /* Normal TRU-Install Logic */ - else { - US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n"); - - swocInfo = kmalloc(sizeof(struct swoc_info), - GFP_KERNEL); - if (!swocInfo) { - US_DEBUGP("SWIMS: %s", "Allocation failure\n"); - return -ENOMEM; - } - - retries = 3; - do { - retries--; - result = sierra_get_swoc_info(udev, swocInfo); - if (result < 0) { - US_DEBUGP("SWIMS: %s", "Failed SWoC query\n"); - schedule_timeout_uninterruptible(2*HZ); - } - } while (retries && result < 0); - - if (result < 0) { - US_DEBUGP("SWIMS: %s", - "Completely failed SWoC query\n"); - kfree(swocInfo); - return -EIO; - } - - debug_swoc(swocInfo); - - /* If there is not Linux software on the TRU-Install device - * then switch to modem mode - */ - if (!containsFullLinuxPackage(swocInfo)) { - US_DEBUGP("SWIMS: %s", - "Switching to Modem Mode\n"); - result = sierra_set_ms_mode(udev, - SWIMS_SET_MODE_Modem); - if (result < 0) - US_DEBUGP("SWIMS: Failed to switch modem\n"); - kfree(swocInfo); - return -EIO; - } - kfree(swocInfo); - } -complete: - result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); - - return USB_STOR_TRANSPORT_GOOD; -} - diff --git a/trunk/drivers/usb/storage/sierra_ms.h b/trunk/drivers/usb/storage/sierra_ms.h deleted file mode 100644 index bb48634ac1fc..000000000000 --- a/trunk/drivers/usb/storage/sierra_ms.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _SIERRA_MS_H_ -#define _SIERRA_MS_H_ -extern int sierra_ms_init(struct us_data *us); -#endif diff --git a/trunk/drivers/usb/storage/transport.c b/trunk/drivers/usb/storage/transport.c index 3523a0bfa0ff..fcbbfdb7b2b0 100644 --- a/trunk/drivers/usb/storage/transport.c +++ b/trunk/drivers/usb/storage/transport.c @@ -1032,21 +1032,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) /* try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ - if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) { - - /* Heuristically detect devices that generate bogus residues - * by seeing what happens with INQUIRY and READ CAPACITY - * commands. - */ - if (bcs->Status == US_BULK_STAT_OK && - scsi_get_resid(srb) == 0 && - ((srb->cmnd[0] == INQUIRY && - transfer_length == 36) || - (srb->cmnd[0] == READ_CAPACITY && - transfer_length == 8))) { - us->fflags |= US_FL_IGNORE_RESIDUE; - - } else { + if (residue) { + if (!(us->fflags & US_FL_IGNORE_RESIDUE)) { residue = min(residue, transfer_length); scsi_set_resid(srb, max(scsi_get_resid(srb), (int) residue)); diff --git a/trunk/drivers/usb/storage/unusual_devs.h b/trunk/drivers/usb/storage/unusual_devs.h index ba412e68d474..7ae69f55aa96 100644 --- a/trunk/drivers/usb/storage/unusual_devs.h +++ b/trunk/drivers/usb/storage/unusual_devs.h @@ -225,13 +225,6 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), -/* Reported by Cedric Godin */ -UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, - "Nokia", - "5300", - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_FIX_CAPACITY ), - /* Reported by Olaf Hering from novell bug #105878 */ UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, "SMSC", @@ -363,14 +356,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200, US_FL_FIX_CAPACITY), /* Reported by Emil Larsson */ -UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111, +UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110, "NIKON", "NIKON DSC D80", US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), /* Reported by Ortwin Glueck */ -UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111, +UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110, "NIKON", "NIKON DSC D40", US_SC_DEVICE, US_PR_DEVICE, NULL, @@ -1192,13 +1185,6 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY ), -/* Reported by Rauch Wolke */ -UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, - "Simple Tech/Datafab", - "CF+SM Reader", - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_IGNORE_RESIDUE ), - /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant * to the USB storage specification in two ways: * - They tell us they are using transport protocol CBI. In reality they @@ -1576,7 +1562,6 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, 0), -#ifdef CONFIG_USB_STORAGE_SIERRA /* Reported by Kevin Lloyd * Entry is needed for the initializer function override, * which instructs the device to load as a modem @@ -1585,9 +1570,8 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, "Sierra Wireless", "USB MMC Storage", - US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, - 0), -#endif + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE), /* Reported by Jaco Kroon * The usb-storage module found on the Digitech GNX4 (and supposedly other @@ -1758,15 +1742,6 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), -/* - * Patch by Jost Diederichs - */ -UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999, - "Motorola Inc.", - "Motorola Phone (RAZRV3xx)", - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_FIX_CAPACITY), - /* * Patch by Constantin Baranov * Report by Andreas Koenecke. @@ -1792,13 +1767,6 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), -/* Reported by Andrey Rahmatullin */ -UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, - "iRiver", - "MP3 T10", - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_IGNORE_RESIDUE ), - /* * David Härdeman * The key makes the SCSI stack print confusing (but harmless) messages diff --git a/trunk/drivers/usb/storage/usb.c b/trunk/drivers/usb/storage/usb.c index 73679aa506de..bfea851be985 100644 --- a/trunk/drivers/usb/storage/usb.c +++ b/trunk/drivers/usb/storage/usb.c @@ -102,9 +102,6 @@ #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB #include "cypress_atacb.h" #endif -#ifdef CONFIG_USB_STORAGE_SIERRA -#include "sierra_ms.h" -#endif /* Some informational data */ MODULE_AUTHOR("Matthew Dharm "); diff --git a/trunk/fs/dlm/config.c b/trunk/fs/dlm/config.c index 89d2fb7b991a..c4e7d721bd8d 100644 --- a/trunk/fs/dlm/config.c +++ b/trunk/fs/dlm/config.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -30,16 +30,16 @@ static struct config_group *space_list; static struct config_group *comm_list; -static struct dlm_comm *local_comm; +static struct comm *local_comm; -struct dlm_clusters; -struct dlm_cluster; -struct dlm_spaces; -struct dlm_space; -struct dlm_comms; -struct dlm_comm; -struct dlm_nodes; -struct dlm_node; +struct clusters; +struct cluster; +struct spaces; +struct space; +struct comms; +struct comm; +struct nodes; +struct node; static struct config_group *make_cluster(struct config_group *, const char *); static void drop_cluster(struct config_group *, struct config_item *); @@ -68,22 +68,17 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, const char *buf, size_t len); -static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf); -static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t comm_local_read(struct dlm_comm *cm, char *buf); -static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf); -static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, - size_t len); -static ssize_t node_weight_read(struct dlm_node *nd, char *buf); -static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, - size_t len); - -struct dlm_cluster { +static ssize_t comm_nodeid_read(struct comm *cm, char *buf); +static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len); +static ssize_t comm_local_read(struct comm *cm, char *buf); +static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len); +static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len); +static ssize_t node_nodeid_read(struct node *nd, char *buf); +static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len); +static ssize_t node_weight_read(struct node *nd, char *buf); +static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len); + +struct cluster { struct config_group group; unsigned int cl_tcp_port; unsigned int cl_buffer_size; @@ -114,11 +109,11 @@ enum { struct cluster_attribute { struct configfs_attribute attr; - ssize_t (*show)(struct dlm_cluster *, char *); - ssize_t (*store)(struct dlm_cluster *, const char *, size_t); + ssize_t (*show)(struct cluster *, char *); + ssize_t (*store)(struct cluster *, const char *, size_t); }; -static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field, +static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field, int *info_field, int check_zero, const char *buf, size_t len) { @@ -139,12 +134,12 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field, } #define CLUSTER_ATTR(name, check_zero) \ -static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \ +static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len) \ { \ return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \ check_zero, buf, len); \ } \ -static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \ +static ssize_t name##_read(struct cluster *cl, char *buf) \ { \ return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \ } \ @@ -186,8 +181,8 @@ enum { struct comm_attribute { struct configfs_attribute attr; - ssize_t (*show)(struct dlm_comm *, char *); - ssize_t (*store)(struct dlm_comm *, const char *, size_t); + ssize_t (*show)(struct comm *, char *); + ssize_t (*store)(struct comm *, const char *, size_t); }; static struct comm_attribute comm_attr_nodeid = { @@ -227,8 +222,8 @@ enum { struct node_attribute { struct configfs_attribute attr; - ssize_t (*show)(struct dlm_node *, char *); - ssize_t (*store)(struct dlm_node *, const char *, size_t); + ssize_t (*show)(struct node *, char *); + ssize_t (*store)(struct node *, const char *, size_t); }; static struct node_attribute node_attr_nodeid = { @@ -253,26 +248,26 @@ static struct configfs_attribute *node_attrs[] = { NULL, }; -struct dlm_clusters { +struct clusters { struct configfs_subsystem subsys; }; -struct dlm_spaces { +struct spaces { struct config_group ss_group; }; -struct dlm_space { +struct space { struct config_group group; struct list_head members; struct mutex members_lock; int members_count; }; -struct dlm_comms { +struct comms { struct config_group cs_group; }; -struct dlm_comm { +struct comm { struct config_item item; int nodeid; int local; @@ -280,11 +275,11 @@ struct dlm_comm { struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; -struct dlm_nodes { +struct nodes { struct config_group ns_group; }; -struct dlm_node { +struct node { struct config_item item; struct list_head list; /* space->members */ int nodeid; @@ -377,40 +372,38 @@ static struct config_item_type node_type = { .ct_owner = THIS_MODULE, }; -static struct dlm_cluster *to_cluster(struct config_item *i) +static struct cluster *to_cluster(struct config_item *i) { - return i ? container_of(to_config_group(i), struct dlm_cluster, group) : - NULL; + return i ? container_of(to_config_group(i), struct cluster, group):NULL; } -static struct dlm_space *to_space(struct config_item *i) +static struct space *to_space(struct config_item *i) { - return i ? container_of(to_config_group(i), struct dlm_space, group) : - NULL; + return i ? container_of(to_config_group(i), struct space, group) : NULL; } -static struct dlm_comm *to_comm(struct config_item *i) +static struct comm *to_comm(struct config_item *i) { - return i ? container_of(i, struct dlm_comm, item) : NULL; + return i ? container_of(i, struct comm, item) : NULL; } -static struct dlm_node *to_node(struct config_item *i) +static struct node *to_node(struct config_item *i) { - return i ? container_of(i, struct dlm_node, item) : NULL; + return i ? container_of(i, struct node, item) : NULL; } static struct config_group *make_cluster(struct config_group *g, const char *name) { - struct dlm_cluster *cl = NULL; - struct dlm_spaces *sps = NULL; - struct dlm_comms *cms = NULL; + struct cluster *cl = NULL; + struct spaces *sps = NULL; + struct comms *cms = NULL; void *gps = NULL; - cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL); + cl = kzalloc(sizeof(struct cluster), GFP_KERNEL); gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); - sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL); - cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL); + sps = kzalloc(sizeof(struct spaces), GFP_KERNEL); + cms = kzalloc(sizeof(struct comms), GFP_KERNEL); if (!cl || !gps || !sps || !cms) goto fail; @@ -450,7 +443,7 @@ static struct config_group *make_cluster(struct config_group *g, static void drop_cluster(struct config_group *g, struct config_item *i) { - struct dlm_cluster *cl = to_cluster(i); + struct cluster *cl = to_cluster(i); struct config_item *tmp; int j; @@ -468,20 +461,20 @@ static void drop_cluster(struct config_group *g, struct config_item *i) static void release_cluster(struct config_item *i) { - struct dlm_cluster *cl = to_cluster(i); + struct cluster *cl = to_cluster(i); kfree(cl->group.default_groups); kfree(cl); } static struct config_group *make_space(struct config_group *g, const char *name) { - struct dlm_space *sp = NULL; - struct dlm_nodes *nds = NULL; + struct space *sp = NULL; + struct nodes *nds = NULL; void *gps = NULL; - sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL); + sp = kzalloc(sizeof(struct space), GFP_KERNEL); gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL); - nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL); + nds = kzalloc(sizeof(struct nodes), GFP_KERNEL); if (!sp || !gps || !nds) goto fail; @@ -507,7 +500,7 @@ static struct config_group *make_space(struct config_group *g, const char *name) static void drop_space(struct config_group *g, struct config_item *i) { - struct dlm_space *sp = to_space(i); + struct space *sp = to_space(i); struct config_item *tmp; int j; @@ -524,16 +517,16 @@ static void drop_space(struct config_group *g, struct config_item *i) static void release_space(struct config_item *i) { - struct dlm_space *sp = to_space(i); + struct space *sp = to_space(i); kfree(sp->group.default_groups); kfree(sp); } static struct config_item *make_comm(struct config_group *g, const char *name) { - struct dlm_comm *cm; + struct comm *cm; - cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL); + cm = kzalloc(sizeof(struct comm), GFP_KERNEL); if (!cm) return ERR_PTR(-ENOMEM); @@ -546,7 +539,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name) static void drop_comm(struct config_group *g, struct config_item *i) { - struct dlm_comm *cm = to_comm(i); + struct comm *cm = to_comm(i); if (local_comm == cm) local_comm = NULL; dlm_lowcomms_close(cm->nodeid); @@ -557,16 +550,16 @@ static void drop_comm(struct config_group *g, struct config_item *i) static void release_comm(struct config_item *i) { - struct dlm_comm *cm = to_comm(i); + struct comm *cm = to_comm(i); kfree(cm); } static struct config_item *make_node(struct config_group *g, const char *name) { - struct dlm_space *sp = to_space(g->cg_item.ci_parent); - struct dlm_node *nd; + struct space *sp = to_space(g->cg_item.ci_parent); + struct node *nd; - nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL); + nd = kzalloc(sizeof(struct node), GFP_KERNEL); if (!nd) return ERR_PTR(-ENOMEM); @@ -585,8 +578,8 @@ static struct config_item *make_node(struct config_group *g, const char *name) static void drop_node(struct config_group *g, struct config_item *i) { - struct dlm_space *sp = to_space(g->cg_item.ci_parent); - struct dlm_node *nd = to_node(i); + struct space *sp = to_space(g->cg_item.ci_parent); + struct node *nd = to_node(i); mutex_lock(&sp->members_lock); list_del(&nd->list); @@ -598,11 +591,11 @@ static void drop_node(struct config_group *g, struct config_item *i) static void release_node(struct config_item *i) { - struct dlm_node *nd = to_node(i); + struct node *nd = to_node(i); kfree(nd); } -static struct dlm_clusters clusters_root = { +static struct clusters clusters_root = { .subsys = { .su_group = { .cg_item = { @@ -632,7 +625,7 @@ void dlm_config_exit(void) static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, char *buf) { - struct dlm_cluster *cl = to_cluster(i); + struct cluster *cl = to_cluster(i); struct cluster_attribute *cla = container_of(a, struct cluster_attribute, attr); return cla->show ? cla->show(cl, buf) : 0; @@ -642,7 +635,7 @@ static ssize_t store_cluster(struct config_item *i, struct configfs_attribute *a, const char *buf, size_t len) { - struct dlm_cluster *cl = to_cluster(i); + struct cluster *cl = to_cluster(i); struct cluster_attribute *cla = container_of(a, struct cluster_attribute, attr); return cla->store ? cla->store(cl, buf, len) : -EINVAL; @@ -651,7 +644,7 @@ static ssize_t store_cluster(struct config_item *i, static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, char *buf) { - struct dlm_comm *cm = to_comm(i); + struct comm *cm = to_comm(i); struct comm_attribute *cma = container_of(a, struct comm_attribute, attr); return cma->show ? cma->show(cm, buf) : 0; @@ -660,31 +653,29 @@ static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, const char *buf, size_t len) { - struct dlm_comm *cm = to_comm(i); + struct comm *cm = to_comm(i); struct comm_attribute *cma = container_of(a, struct comm_attribute, attr); return cma->store ? cma->store(cm, buf, len) : -EINVAL; } -static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf) +static ssize_t comm_nodeid_read(struct comm *cm, char *buf) { return sprintf(buf, "%d\n", cm->nodeid); } -static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, - size_t len) +static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len) { cm->nodeid = simple_strtol(buf, NULL, 0); return len; } -static ssize_t comm_local_read(struct dlm_comm *cm, char *buf) +static ssize_t comm_local_read(struct comm *cm, char *buf) { return sprintf(buf, "%d\n", cm->local); } -static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, - size_t len) +static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len) { cm->local= simple_strtol(buf, NULL, 0); if (cm->local && !local_comm) @@ -692,7 +683,7 @@ static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, return len; } -static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) +static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len) { struct sockaddr_storage *addr; @@ -714,7 +705,7 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, char *buf) { - struct dlm_node *nd = to_node(i); + struct node *nd = to_node(i); struct node_attribute *nda = container_of(a, struct node_attribute, attr); return nda->show ? nda->show(nd, buf) : 0; @@ -723,31 +714,29 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, const char *buf, size_t len) { - struct dlm_node *nd = to_node(i); + struct node *nd = to_node(i); struct node_attribute *nda = container_of(a, struct node_attribute, attr); return nda->store ? nda->store(nd, buf, len) : -EINVAL; } -static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf) +static ssize_t node_nodeid_read(struct node *nd, char *buf) { return sprintf(buf, "%d\n", nd->nodeid); } -static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, - size_t len) +static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len) { nd->nodeid = simple_strtol(buf, NULL, 0); return len; } -static ssize_t node_weight_read(struct dlm_node *nd, char *buf) +static ssize_t node_weight_read(struct node *nd, char *buf) { return sprintf(buf, "%d\n", nd->weight); } -static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, - size_t len) +static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len) { nd->weight = simple_strtol(buf, NULL, 0); return len; @@ -757,7 +746,7 @@ static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, * Functions for the dlm to get the info that's been configured */ -static struct dlm_space *get_space(char *name) +static struct space *get_space(char *name) { struct config_item *i; @@ -771,15 +760,15 @@ static struct dlm_space *get_space(char *name) return to_space(i); } -static void put_space(struct dlm_space *sp) +static void put_space(struct space *sp) { config_item_put(&sp->group.cg_item); } -static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr) +static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr) { struct config_item *i; - struct dlm_comm *cm = NULL; + struct comm *cm = NULL; int found = 0; if (!comm_list) @@ -812,7 +801,7 @@ static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr) return cm; } -static void put_comm(struct dlm_comm *cm) +static void put_comm(struct comm *cm) { config_item_put(&cm->item); } @@ -821,8 +810,8 @@ static void put_comm(struct dlm_comm *cm) int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out, int **new_out, int *new_count_out) { - struct dlm_space *sp; - struct dlm_node *nd; + struct space *sp; + struct node *nd; int i = 0, rv = 0, ids_count = 0, new_count = 0; int *ids, *new; @@ -885,8 +874,8 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out, int dlm_node_weight(char *lsname, int nodeid) { - struct dlm_space *sp; - struct dlm_node *nd; + struct space *sp; + struct node *nd; int w = -EEXIST; sp = get_space(lsname); @@ -908,7 +897,7 @@ int dlm_node_weight(char *lsname, int nodeid) int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr) { - struct dlm_comm *cm = get_comm(nodeid, NULL); + struct comm *cm = get_comm(nodeid, NULL); if (!cm) return -EEXIST; if (!cm->addr_count) @@ -920,7 +909,7 @@ int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr) int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) { - struct dlm_comm *cm = get_comm(0, addr); + struct comm *cm = get_comm(0, addr); if (!cm) return -EEXIST; *nodeid = cm->nodeid; diff --git a/trunk/fs/dlm/user.c b/trunk/fs/dlm/user.c index 34f14a14fb4e..929e48ae7591 100644 --- a/trunk/fs/dlm/user.c +++ b/trunk/fs/dlm/user.c @@ -527,10 +527,8 @@ static ssize_t device_write(struct file *file, const char __user *buf, k32buf = (struct dlm_write_request32 *)kbuf; kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - sizeof(struct dlm_write_request32)), GFP_KERNEL); - if (!kbuf) { - kfree(k32buf); + if (!kbuf) return -ENOMEM; - } if (proc) set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); @@ -541,10 +539,8 @@ static ssize_t device_write(struct file *file, const char __user *buf, /* do we really need this? can a write happen after a close? */ if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) && - (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) { - error = -EINVAL; - goto out_free; - } + (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) + return -EINVAL; sigfillset(&allsigs); sigprocmask(SIG_BLOCK, &allsigs, &tmpsig); diff --git a/trunk/fs/xfs/linux-2.6/sema.h b/trunk/fs/xfs/linux-2.6/sema.h new file mode 100644 index 000000000000..3abe7e9ceb33 --- /dev/null +++ b/trunk/fs/xfs/linux-2.6/sema.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPPORT_SEMA_H__ +#define __XFS_SUPPORT_SEMA_H__ + +#include +#include +#include +#include + +/* + * sema_t structure just maps to struct semaphore in Linux kernel. + */ + +typedef struct semaphore sema_t; + +#define initnsema(sp, val, name) sema_init(sp, val) +#define psema(sp, b) down(sp) +#define vsema(sp) up(sp) +#define freesema(sema) do { } while (0) + +static inline int issemalocked(sema_t *sp) +{ + return down_trylock(sp) || (up(sp), 0); +} + +/* + * Map cpsema (try to get the sema) to down_trylock. We need to switch + * the return values since cpsema returns 1 (acquired) 0 (failed) and + * down_trylock returns the reverse 0 (acquired) 1 (failed). + */ +static inline int cpsema(sema_t *sp) +{ + return down_trylock(sp) ? 0 : 1; +} + +#endif /* __XFS_SUPPORT_SEMA_H__ */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_aops.c b/trunk/fs/xfs/linux-2.6/xfs_aops.c index f42f80a3b1fa..fa47e43b8b41 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_aops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_aops.c @@ -73,6 +73,7 @@ xfs_page_trace( unsigned long pgoff) { xfs_inode_t *ip; + bhv_vnode_t *vp = vn_from_inode(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -80,7 +81,7 @@ xfs_page_trace( if (page_has_buffers(page)) xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - ip = XFS_I(inode); + ip = xfs_vtoi(vp); if (!ip->i_rwtrace) return; diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.c b/trunk/fs/xfs/linux-2.6/xfs_buf.c index 986061ae1b9b..9cc8f0213095 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.c +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.c @@ -58,7 +58,7 @@ xfs_buf_trace( bp, id, (void *)(unsigned long)bp->b_flags, (void *)(unsigned long)bp->b_hold.counter, - (void *)(unsigned long)bp->b_sema.count, + (void *)(unsigned long)bp->b_sema.count.counter, (void *)current, data, ra, (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), @@ -253,7 +253,7 @@ _xfs_buf_initialize( memset(bp, 0, sizeof(xfs_buf_t)); atomic_set(&bp->b_hold, 1); - init_completion(&bp->b_iowait); + init_MUTEX_LOCKED(&bp->b_iodonesema); INIT_LIST_HEAD(&bp->b_list); INIT_LIST_HEAD(&bp->b_hash_list); init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ @@ -838,7 +838,6 @@ xfs_buf_rele( return; } - ASSERT(atomic_read(&bp->b_hold) > 0); if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { if (bp->b_relse) { atomic_inc(&bp->b_hold); @@ -852,6 +851,11 @@ xfs_buf_rele( spin_unlock(&hash->bh_lock); xfs_buf_free(bp); } + } else { + /* + * Catch reference count leaks + */ + ASSERT(atomic_read(&bp->b_hold) >= 0); } } @@ -1033,7 +1037,7 @@ xfs_buf_ioend( xfs_buf_iodone_work(&bp->b_iodone_work); } } else { - complete(&bp->b_iowait); + up(&bp->b_iodonesema); } } @@ -1271,7 +1275,7 @@ xfs_buf_iowait( XB_TRACE(bp, "iowait", 0); if (atomic_read(&bp->b_io_remaining)) blk_run_address_space(bp->b_target->bt_mapping); - wait_for_completion(&bp->b_iowait); + down(&bp->b_iodonesema); XB_TRACE(bp, "iowaited", (long)bp->b_error); return bp->b_error; } @@ -1795,7 +1799,7 @@ int __init xfs_buf_init(void) { #ifdef XFS_BUF_TRACE - xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS); + xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); #endif xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.h b/trunk/fs/xfs/linux-2.6/xfs_buf.h index fe0109956656..29d1d4adc078 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.h +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.h @@ -157,7 +157,7 @@ typedef struct xfs_buf { xfs_buf_iodone_t b_iodone; /* I/O completion function */ xfs_buf_relse_t b_relse; /* releasing function */ xfs_buf_bdstrat_t b_strat; /* pre-write function */ - struct completion b_iowait; /* queue for I/O waiters */ + struct semaphore b_iodonesema; /* Semaphore for I/O waiters */ void *b_fspriv; void *b_fspriv2; void *b_fspriv3; @@ -352,7 +352,7 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) #define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) #define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) -#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); +#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema); #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) #define XFS_BUF_TARGET(bp) ((bp)->b_target) diff --git a/trunk/fs/xfs/linux-2.6/xfs_export.c b/trunk/fs/xfs/linux-2.6/xfs_export.c index 24fd598af846..987fe84f7b13 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_export.c +++ b/trunk/fs/xfs/linux-2.6/xfs_export.c @@ -139,7 +139,7 @@ xfs_nfs_get_inode( } xfs_iunlock(ip, XFS_ILOCK_SHARED); - return VFS_I(ip); + return ip->i_vnode; } STATIC struct dentry * @@ -167,7 +167,7 @@ xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid, if (!inode) return NULL; if (IS_ERR(inode)) - return ERR_CAST(inode); + return ERR_PTR(PTR_ERR(inode)); result = d_alloc_anon(inode); if (!result) { iput(inode); @@ -198,7 +198,7 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, if (!inode) return NULL; if (IS_ERR(inode)) - return ERR_CAST(inode); + return ERR_PTR(PTR_ERR(inode)); result = d_alloc_anon(inode); if (!result) { iput(inode); @@ -219,9 +219,9 @@ xfs_fs_get_parent( if (unlikely(error)) return ERR_PTR(-error); - parent = d_alloc_anon(VFS_I(cip)); + parent = d_alloc_anon(cip->i_vnode); if (unlikely(!parent)) { - iput(VFS_I(cip)); + iput(cip->i_vnode); return ERR_PTR(-ENOMEM); } return parent; diff --git a/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c b/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c index 36caa6d957df..1eefe61f0e10 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -31,7 +31,7 @@ xfs_tosspages( xfs_off_t last, int fiopt) { - struct address_space *mapping = VFS_I(ip)->i_mapping; + struct address_space *mapping = ip->i_vnode->i_mapping; if (mapping->nrpages) truncate_inode_pages(mapping, first); @@ -44,7 +44,7 @@ xfs_flushinval_pages( xfs_off_t last, int fiopt) { - struct address_space *mapping = VFS_I(ip)->i_mapping; + struct address_space *mapping = ip->i_vnode->i_mapping; int ret = 0; if (mapping->nrpages) { @@ -64,7 +64,7 @@ xfs_flush_pages( uint64_t flags, int fiopt) { - struct address_space *mapping = VFS_I(ip)->i_mapping; + struct address_space *mapping = ip->i_vnode->i_mapping; int ret = 0; int ret2; diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c index 48799ba7e3e6..acb978d9d085 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c @@ -245,7 +245,7 @@ xfs_vget_fsop_handlereq( xfs_iunlock(ip, XFS_ILOCK_SHARED); - *inode = VFS_I(ip); + *inode = XFS_ITOV(ip); return 0; } @@ -927,7 +927,7 @@ STATIC void xfs_diflags_to_linux( struct xfs_inode *ip) { - struct inode *inode = VFS_I(ip); + struct inode *inode = XFS_ITOV(ip); unsigned int xflags = xfs_ip2xflags(ip); if (xflags & XFS_XFLAG_IMMUTABLE) diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.c b/trunk/fs/xfs/linux-2.6/xfs_iops.c index 91bcd979242c..e88f51028086 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.c @@ -62,7 +62,7 @@ void xfs_synchronize_atime( xfs_inode_t *ip) { - struct inode *inode = VFS_I(ip); + struct inode *inode = ip->i_vnode; if (inode) { ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; @@ -79,7 +79,7 @@ void xfs_mark_inode_dirty_sync( xfs_inode_t *ip) { - struct inode *inode = VFS_I(ip); + struct inode *inode = ip->i_vnode; if (inode) mark_inode_dirty_sync(inode); @@ -89,31 +89,36 @@ xfs_mark_inode_dirty_sync( * Change the requested timestamp in the given inode. * We don't lock across timestamp updates, and we don't log them but * we do record the fact that there is dirty information in core. + * + * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG + * with XFS_ICHGTIME_ACC to be sure that access time + * update will take. Calling first with XFS_ICHGTIME_ACC + * and then XFS_ICHGTIME_MOD may fail to modify the access + * timestamp if the filesystem is mounted noacctm. */ void xfs_ichgtime( xfs_inode_t *ip, int flags) { - struct inode *inode = VFS_I(ip); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); timespec_t tv; - int sync_it = 0; - - tv = current_fs_time(inode->i_sb); - if ((flags & XFS_ICHGTIME_MOD) && - !timespec_equal(&inode->i_mtime, &tv)) { + nanotime(&tv); + if (flags & XFS_ICHGTIME_MOD) { inode->i_mtime = tv; ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; - sync_it = 1; } - if ((flags & XFS_ICHGTIME_CHG) && - !timespec_equal(&inode->i_ctime, &tv)) { + if (flags & XFS_ICHGTIME_ACC) { + inode->i_atime = tv; + ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; + } + if (flags & XFS_ICHGTIME_CHG) { inode->i_ctime = tv; ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; - sync_it = 1; } /* @@ -125,11 +130,55 @@ xfs_ichgtime( * ensure that the compiler does not reorder the update * of i_update_core above the timestamp updates above. */ - if (sync_it) { - SYNCHRONIZE(); - ip->i_update_core = 1; + SYNCHRONIZE(); + ip->i_update_core = 1; + if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); +} + +/* + * Variant on the above which avoids querying the system clock + * in situations where we know the Linux inode timestamps have + * just been updated (and so we can update our inode cheaply). + */ +void +xfs_ichgtime_fast( + xfs_inode_t *ip, + struct inode *inode, + int flags) +{ + timespec_t *tvp; + + /* + * Atime updates for read() & friends are handled lazily now, and + * explicit updates must go through xfs_ichgtime() + */ + ASSERT((flags & XFS_ICHGTIME_ACC) == 0); + + if (flags & XFS_ICHGTIME_MOD) { + tvp = &inode->i_mtime; + ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; + ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec; } + if (flags & XFS_ICHGTIME_CHG) { + tvp = &inode->i_ctime; + ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec; + ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec; + } + + /* + * We update the i_update_core field _after_ changing + * the timestamps in order to coordinate properly with + * xfs_iflush() so that we don't lose timestamp updates. + * This keeps us from having to hold the inode lock + * while doing this. We use the SYNCHRONIZE macro to + * ensure that the compiler does not reorder the update + * of i_update_core above the timestamp updates above. + */ + SYNCHRONIZE(); + ip->i_update_core = 1; + if (!(inode->i_state & I_NEW)) + mark_inode_dirty_sync(inode); } /* @@ -250,7 +299,7 @@ xfs_vn_mknod( if (unlikely(error)) goto out_free_acl; - inode = VFS_I(ip); + inode = ip->i_vnode; error = xfs_init_security(inode, dir); if (unlikely(error)) @@ -317,7 +366,7 @@ xfs_vn_lookup( return NULL; } - return d_splice_alias(VFS_I(cip), dentry); + return d_splice_alias(cip->i_vnode, dentry); } STATIC struct dentry * @@ -350,12 +399,12 @@ xfs_vn_ci_lookup( /* if exact match, just splice and exit */ if (!ci_name.name) - return d_splice_alias(VFS_I(ip), dentry); + return d_splice_alias(ip->i_vnode, dentry); /* else case-insensitive match... */ dname.name = ci_name.name; dname.len = ci_name.len; - dentry = d_add_ci(VFS_I(ip), dentry, &dname); + dentry = d_add_ci(ip->i_vnode, dentry, &dname); kmem_free(ci_name.name); return dentry; } @@ -429,7 +478,7 @@ xfs_vn_symlink( if (unlikely(error)) goto out; - inode = VFS_I(cip); + inode = cip->i_vnode; error = xfs_init_security(inode, dir); if (unlikely(error)) @@ -661,7 +710,7 @@ xfs_vn_fallocate( return error; } -static const struct inode_operations xfs_inode_operations = { +const struct inode_operations xfs_inode_operations = { .permission = xfs_vn_permission, .truncate = xfs_vn_truncate, .getattr = xfs_vn_getattr, @@ -673,7 +722,7 @@ static const struct inode_operations xfs_inode_operations = { .fallocate = xfs_vn_fallocate, }; -static const struct inode_operations xfs_dir_inode_operations = { +const struct inode_operations xfs_dir_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_lookup, .link = xfs_vn_link, @@ -698,7 +747,7 @@ static const struct inode_operations xfs_dir_inode_operations = { .listxattr = xfs_vn_listxattr, }; -static const struct inode_operations xfs_dir_ci_inode_operations = { +const struct inode_operations xfs_dir_ci_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_ci_lookup, .link = xfs_vn_link, @@ -723,7 +772,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = { .listxattr = xfs_vn_listxattr, }; -static const struct inode_operations xfs_symlink_inode_operations = { +const struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = xfs_vn_follow_link, .put_link = xfs_vn_put_link, @@ -735,98 +784,3 @@ static const struct inode_operations xfs_symlink_inode_operations = { .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; - -STATIC void -xfs_diflags_to_iflags( - struct inode *inode, - struct xfs_inode *ip) -{ - if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; - if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; - if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - else - inode->i_flags &= ~S_NOATIME; -} - -/* - * Initialize the Linux inode, set up the operation vectors and - * unlock the inode. - * - * When reading existing inodes from disk this is called directly - * from xfs_iget, when creating a new inode it is called from - * xfs_ialloc after setting up the inode. - */ -void -xfs_setup_inode( - struct xfs_inode *ip) -{ - struct inode *inode = ip->i_vnode; - - inode->i_mode = ip->i_d.di_mode; - inode->i_nlink = ip->i_d.di_nlink; - inode->i_uid = ip->i_d.di_uid; - inode->i_gid = ip->i_d.di_gid; - - switch (inode->i_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - inode->i_rdev = - MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, - sysv_minor(ip->i_df.if_u2.if_rdev)); - break; - default: - inode->i_rdev = 0; - break; - } - - inode->i_generation = ip->i_d.di_gen; - i_size_write(inode, ip->i_d.di_size); - inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; - inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; - inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; - inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; - inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; - inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; - xfs_diflags_to_iflags(inode, ip); - xfs_iflags_clear(ip, XFS_IMODIFIED); - - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - inode->i_op = &xfs_inode_operations; - inode->i_fop = &xfs_file_operations; - inode->i_mapping->a_ops = &xfs_address_space_operations; - break; - case S_IFDIR: - if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) - inode->i_op = &xfs_dir_ci_inode_operations; - else - inode->i_op = &xfs_dir_inode_operations; - inode->i_fop = &xfs_dir_file_operations; - break; - case S_IFLNK: - inode->i_op = &xfs_symlink_inode_operations; - if (!(ip->i_df.if_flags & XFS_IFINLINE)) - inode->i_mapping->a_ops = &xfs_address_space_operations; - break; - default: - inode->i_op = &xfs_inode_operations; - init_special_inode(inode, inode->i_mode, inode->i_rdev); - break; - } - - xfs_iflags_clear(ip, XFS_INEW); - barrier(); - - unlock_new_inode(inode); -} diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.h b/trunk/fs/xfs/linux-2.6/xfs_iops.h index 8b1a1e31dc21..d97ba934a2ac 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.h +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.h @@ -18,7 +18,10 @@ #ifndef __XFS_IOPS_H__ #define __XFS_IOPS_H__ -struct xfs_inode; +extern const struct inode_operations xfs_inode_operations; +extern const struct inode_operations xfs_dir_inode_operations; +extern const struct inode_operations xfs_dir_ci_inode_operations; +extern const struct inode_operations xfs_symlink_inode_operations; extern const struct file_operations xfs_file_operations; extern const struct file_operations xfs_dir_file_operations; @@ -26,6 +29,14 @@ extern const struct file_operations xfs_invis_file_operations; extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); -extern void xfs_setup_inode(struct xfs_inode *); +struct xfs_inode; +extern void xfs_ichgtime(struct xfs_inode *, int); +extern void xfs_ichgtime_fast(struct xfs_inode *, struct inode *, int); + +#define xfs_vtoi(vp) \ + ((struct xfs_inode *)vn_to_inode(vp)->i_private) + +#define XFS_I(inode) \ + ((struct xfs_inode *)(inode)->i_private) #endif /* __XFS_IOPS_H__ */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_linux.h b/trunk/fs/xfs/linux-2.6/xfs_linux.h index cc0f7b3a9795..4d45d9351a6c 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_linux.h +++ b/trunk/fs/xfs/linux-2.6/xfs_linux.h @@ -45,13 +45,13 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -126,6 +126,8 @@ #define current_cpu() (raw_smp_processor_id()) #define current_pid() (current->pid) +#define current_fsuid(cred) (current->fsuid) +#define current_fsgid(cred) (current->fsgid) #define current_test_flags(f) (current->flags & (f)) #define current_set_flags_nested(sp, f) \ (*(sp) = current->flags, current->flags |= (f)) @@ -178,7 +180,7 @@ #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) #define xfs_stack_trace() dump_stack() #define xfs_itruncate_data(ip, off) \ - (-vmtruncate(VFS_I(ip), (off))) + (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) /* Move the kernel do_div definition off to one side */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_lrw.c b/trunk/fs/xfs/linux-2.6/xfs_lrw.c index 1957e5357d04..82333b3e118e 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_lrw.c +++ b/trunk/fs/xfs/linux-2.6/xfs_lrw.c @@ -137,7 +137,7 @@ xfs_iozero( struct address_space *mapping; int status; - mapping = VFS_I(ip)->i_mapping; + mapping = ip->i_vnode->i_mapping; do { unsigned offset, bytes; void *fsdata; @@ -674,7 +674,9 @@ xfs_write( */ if (likely(!(ioflags & IO_INVIS) && !mnt_want_write(file->f_path.mnt))) { - xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + file_update_time(file); + xfs_ichgtime_fast(xip, inode, + XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); mnt_drop_write(file->f_path.mnt); } diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c index 73c65f19e549..30ae96397e31 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.c +++ b/trunk/fs/xfs/linux-2.6/xfs_super.c @@ -581,6 +581,118 @@ xfs_max_file_offset( return (((__uint64_t)pagefactor) << bitshift) - 1; } +STATIC_INLINE void +xfs_set_inodeops( + struct inode *inode) +{ + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + inode->i_op = &xfs_inode_operations; + inode->i_fop = &xfs_file_operations; + inode->i_mapping->a_ops = &xfs_address_space_operations; + break; + case S_IFDIR: + if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) + inode->i_op = &xfs_dir_ci_inode_operations; + else + inode->i_op = &xfs_dir_inode_operations; + inode->i_fop = &xfs_dir_file_operations; + break; + case S_IFLNK: + inode->i_op = &xfs_symlink_inode_operations; + if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE)) + inode->i_mapping->a_ops = &xfs_address_space_operations; + break; + default: + inode->i_op = &xfs_inode_operations; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + break; + } +} + +STATIC_INLINE void +xfs_revalidate_inode( + xfs_mount_t *mp, + bhv_vnode_t *vp, + xfs_inode_t *ip) +{ + struct inode *inode = vn_to_inode(vp); + + inode->i_mode = ip->i_d.di_mode; + inode->i_nlink = ip->i_d.di_nlink; + inode->i_uid = ip->i_d.di_uid; + inode->i_gid = ip->i_d.di_gid; + + switch (inode->i_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + inode->i_rdev = + MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, + sysv_minor(ip->i_df.if_u2.if_rdev)); + break; + default: + inode->i_rdev = 0; + break; + } + + inode->i_generation = ip->i_d.di_gen; + i_size_write(inode, ip->i_d.di_size); + inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; + inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; + inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; + inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; + inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; + inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; + if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) + inode->i_flags |= S_IMMUTABLE; + else + inode->i_flags &= ~S_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) + inode->i_flags |= S_APPEND; + else + inode->i_flags &= ~S_APPEND; + if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) + inode->i_flags |= S_SYNC; + else + inode->i_flags &= ~S_SYNC; + if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) + inode->i_flags |= S_NOATIME; + else + inode->i_flags &= ~S_NOATIME; + xfs_iflags_clear(ip, XFS_IMODIFIED); +} + +void +xfs_initialize_vnode( + struct xfs_mount *mp, + bhv_vnode_t *vp, + struct xfs_inode *ip) +{ + struct inode *inode = vn_to_inode(vp); + + if (!ip->i_vnode) { + ip->i_vnode = vp; + inode->i_private = ip; + } + + /* + * We need to set the ops vectors, and unlock the inode, but if + * we have been called during the new inode create process, it is + * too early to fill in the Linux inode. We will get called a + * second time once the inode is properly set up, and then we can + * finish our work. + */ + if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) { + xfs_revalidate_inode(mp, vp, ip); + xfs_set_inodeops(inode); + + xfs_iflags_clear(ip, XFS_INEW); + barrier(); + + unlock_new_inode(inode); + } +} + int xfs_blkdev_get( xfs_mount_t *mp, @@ -870,21 +982,26 @@ STATIC struct inode * xfs_fs_alloc_inode( struct super_block *sb) { - return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); + bhv_vnode_t *vp; + + vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); + if (unlikely(!vp)) + return NULL; + return vn_to_inode(vp); } STATIC void xfs_fs_destroy_inode( struct inode *inode) { - kmem_zone_free(xfs_vnode_zone, inode); + kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); } STATIC void xfs_fs_inode_init_once( void *vnode) { - inode_init_once((struct inode *)vnode); + inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); } /* @@ -989,7 +1106,7 @@ void xfs_flush_inode( xfs_inode_t *ip) { - struct inode *inode = VFS_I(ip); + struct inode *inode = ip->i_vnode; igrab(inode); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); @@ -1014,7 +1131,7 @@ void xfs_flush_device( xfs_inode_t *ip) { - struct inode *inode = VFS_I(ip); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); igrab(inode); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); @@ -1083,15 +1200,6 @@ xfssyncd( return 0; } -STATIC void -xfs_free_fsname( - struct xfs_mount *mp) -{ - kfree(mp->m_fsname); - kfree(mp->m_rtname); - kfree(mp->m_logname); -} - STATIC void xfs_fs_put_super( struct super_block *sb) @@ -1131,6 +1239,8 @@ xfs_fs_put_super( error = xfs_unmount_flush(mp, 0); WARN_ON(error); + IRELE(rip); + /* * If we're forcing a shutdown, typically because of a media error, * we want to make sure we invalidate dirty pages that belong to @@ -1147,12 +1257,10 @@ xfs_fs_put_super( } xfs_unmountfs(mp); - xfs_freesb(mp); xfs_icsb_destroy_counters(mp); xfs_close_devices(mp); xfs_qmops_put(mp); xfs_dmops_put(mp); - xfs_free_fsname(mp); kfree(mp); } @@ -1409,8 +1517,6 @@ xfs_start_flags( struct xfs_mount_args *ap, struct xfs_mount *mp) { - int error; - /* Values are in BBs */ if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { /* @@ -1443,27 +1549,17 @@ xfs_start_flags( ap->logbufsize); return XFS_ERROR(EINVAL); } - - error = ENOMEM; - mp->m_logbsize = ap->logbufsize; mp->m_fsname_len = strlen(ap->fsname) + 1; - - mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL); - if (!mp->m_fsname) - goto out; - + mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); + strcpy(mp->m_fsname, ap->fsname); if (ap->rtname[0]) { - mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL); - if (!mp->m_rtname) - goto out_free_fsname; - + mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP); + strcpy(mp->m_rtname, ap->rtname); } - if (ap->logname[0]) { - mp->m_logname = kstrdup(ap->logname, GFP_KERNEL); - if (!mp->m_logname) - goto out_free_rtname; + mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP); + strcpy(mp->m_logname, ap->logname); } if (ap->flags & XFSMNT_WSYNC) @@ -1536,14 +1632,6 @@ xfs_start_flags( if (ap->flags & XFSMNT_DMAPI) mp->m_flags |= XFS_MOUNT_DMAPI; return 0; - - - out_free_rtname: - kfree(mp->m_rtname); - out_free_fsname: - kfree(mp->m_fsname); - out: - return error; } /* @@ -1704,10 +1792,10 @@ xfs_fs_fill_super( */ error = xfs_start_flags(args, mp); if (error) - goto out_free_fsname; + goto out_destroy_counters; error = xfs_readsb(mp, flags); if (error) - goto out_free_fsname; + goto out_destroy_counters; error = xfs_finish_flags(args, mp); if (error) goto out_free_sb; @@ -1723,7 +1811,7 @@ xfs_fs_fill_super( if (error) goto out_free_sb; - error = xfs_mountfs(mp); + error = xfs_mountfs(mp, flags); if (error) goto out_filestream_unmount; @@ -1737,7 +1825,7 @@ xfs_fs_fill_super( sb->s_time_gran = 1; set_posix_acl_flag(sb); - root = igrab(VFS_I(mp->m_rootip)); + root = igrab(mp->m_rootip->i_vnode); if (!root) { error = ENOENT; goto fail_unmount; @@ -1769,8 +1857,7 @@ xfs_fs_fill_super( xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); - out_free_fsname: - xfs_free_fsname(mp); + out_destroy_counters: xfs_icsb_destroy_counters(mp); xfs_close_devices(mp); out_put_qmops: @@ -1803,8 +1890,10 @@ xfs_fs_fill_super( error = xfs_unmount_flush(mp, 0); WARN_ON(error); + IRELE(mp->m_rootip); + xfs_unmountfs(mp); - goto out_free_sb; + goto out_destroy_counters; } STATIC int @@ -1925,7 +2014,7 @@ xfs_free_trace_bufs(void) STATIC int __init xfs_init_zones(void) { - xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode", + xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, xfs_fs_inode_init_once); diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.h b/trunk/fs/xfs/linux-2.6/xfs_super.h index fe2ef4e6a0f9..b7d13da01bd6 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.h +++ b/trunk/fs/xfs/linux-2.6/xfs_super.h @@ -101,6 +101,9 @@ struct block_device; extern __uint64_t xfs_max_file_offset(unsigned int); +extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp, + struct xfs_inode *ip); + extern void xfs_flush_inode(struct xfs_inode *); extern void xfs_flush_device(struct xfs_inode *); diff --git a/trunk/fs/xfs/linux-2.6/xfs_vnode.c b/trunk/fs/xfs/linux-2.6/xfs_vnode.c index b52528bbbfff..25488b6d9881 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vnode.c +++ b/trunk/fs/xfs/linux-2.6/xfs_vnode.c @@ -33,7 +33,7 @@ /* - * Dedicated vnode inactive/reclaim sync wait queues. + * Dedicated vnode inactive/reclaim sync semaphores. * Prime number of hash buckets since address is used as the key. */ #define NVSYNC 37 @@ -82,6 +82,24 @@ vn_ioerror( xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); } + +/* + * Add a reference to a referenced vnode. + */ +bhv_vnode_t * +vn_hold( + bhv_vnode_t *vp) +{ + struct inode *inode; + + XFS_STATS_INC(vn_hold); + + inode = igrab(vn_to_inode(vp)); + ASSERT(inode); + + return vp; +} + #ifdef XFS_INODE_TRACE /* @@ -90,7 +108,7 @@ vn_ioerror( */ static inline int xfs_icount(struct xfs_inode *ip) { - struct inode *vp = VFS_I(ip); + bhv_vnode_t *vp = XFS_ITOV_NULL(ip); if (vp) return vn_count(vp); diff --git a/trunk/fs/xfs/linux-2.6/xfs_vnode.h b/trunk/fs/xfs/linux-2.6/xfs_vnode.h index 683ce16210ff..41ca2cec5d31 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vnode.h +++ b/trunk/fs/xfs/linux-2.6/xfs_vnode.h @@ -22,6 +22,20 @@ struct file; struct xfs_iomap; struct attrlist_cursor_kern; +typedef struct inode bhv_vnode_t; + +/* + * Vnode to Linux inode mapping. + */ +static inline bhv_vnode_t *vn_from_inode(struct inode *inode) +{ + return inode; +} +static inline struct inode *vn_to_inode(bhv_vnode_t *vnode) +{ + return vnode; +} + /* * Return values for xfs_inactive. A return value of * VN_INACTIVE_NOCACHE implies that the file system behavior @@ -62,52 +76,57 @@ extern void vn_iowait(struct xfs_inode *ip); extern void vn_iowake(struct xfs_inode *ip); extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); -static inline int vn_count(struct inode *vp) +static inline int vn_count(bhv_vnode_t *vp) { - return atomic_read(&vp->i_count); + return atomic_read(&vn_to_inode(vp)->i_count); } -#define IHOLD(ip) \ -do { \ - ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ - atomic_inc(&(VFS_I(ip)->i_count)); \ - xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ -} while (0) +/* + * Vnode reference counting functions (and macros for compatibility). + */ +extern bhv_vnode_t *vn_hold(bhv_vnode_t *); -#define IRELE(ip) \ -do { \ - xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ - iput(VFS_I(ip)); \ -} while (0) +#if defined(XFS_INODE_TRACE) +#define VN_HOLD(vp) \ + ((void)vn_hold(vp), \ + xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) +#define VN_RELE(vp) \ + (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ + iput(vn_to_inode(vp))) +#else +#define VN_HOLD(vp) ((void)vn_hold(vp)) +#define VN_RELE(vp) (iput(vn_to_inode(vp))) +#endif -static inline struct inode *vn_grab(struct inode *vp) +static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp) { - return igrab(vp); + struct inode *inode = igrab(vn_to_inode(vp)); + return inode ? vn_from_inode(inode) : NULL; } /* * Dealing with bad inodes */ -static inline int VN_BAD(struct inode *vp) +static inline int VN_BAD(bhv_vnode_t *vp) { - return is_bad_inode(vp); + return is_bad_inode(vn_to_inode(vp)); } /* * Extracting atime values in various formats */ -static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime) +static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime) { bs_atime->tv_sec = vp->i_atime.tv_sec; bs_atime->tv_nsec = vp->i_atime.tv_nsec; } -static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts) +static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts) { *ts = vp->i_atime; } -static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt) +static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) { *tt = vp->i_atime.tv_sec; } @@ -115,9 +134,9 @@ static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt) /* * Some useful predicates. */ -#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) -#define VN_CACHED(vp) (vp->i_mapping->nrpages) -#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ +#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) +#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ PAGECACHE_TAG_DIRTY) diff --git a/trunk/fs/xfs/quota/xfs_dquot.c b/trunk/fs/xfs/quota/xfs_dquot.c index f2705f2fd43c..fc9f3fb39b7b 100644 --- a/trunk/fs/xfs/quota/xfs_dquot.c +++ b/trunk/fs/xfs/quota/xfs_dquot.c @@ -101,18 +101,11 @@ xfs_qm_dqinit( if (brandnewdquot) { dqp->dq_flnext = dqp->dq_flprev = dqp; mutex_init(&dqp->q_qlock); + initnsema(&dqp->q_flock, 1, "fdq"); sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&dqp->q_flush); - complete(&dqp->q_flush); - #ifdef XFS_DQUOT_TRACE - dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS); + dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); xfs_dqtrace_entry(dqp, "DQINIT"); #endif } else { @@ -157,6 +150,7 @@ xfs_qm_dqdestroy( ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); mutex_destroy(&dqp->q_qlock); + freesema(&dqp->q_flock); sv_destroy(&dqp->q_pinwait); #ifdef XFS_DQUOT_TRACE @@ -437,7 +431,7 @@ xfs_qm_dqalloc( * when it unlocks the inode. Since we want to keep the quota * inode around, we bump the vnode ref count now. */ - IHOLD(quotip); + VN_HOLD(XFS_ITOV(quotip)); xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); nmaps = 1; @@ -1217,7 +1211,7 @@ xfs_qm_dqflush( int error; ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(!completion_done(&dqp->q_flush)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); xfs_dqtrace_entry(dqp, "DQFLUSH"); /* @@ -1354,18 +1348,34 @@ xfs_qm_dqflush_done( xfs_dqfunlock(dqp); } + +int +xfs_qm_dqflock_nowait( + xfs_dquot_t *dqp) +{ + int locked; + + locked = cpsema(&((dqp)->q_flock)); + + /* XXX ifdef these out */ + if (locked) + (dqp)->dq_flags |= XFS_DQ_FLOCKED; + return (locked); +} + + int xfs_qm_dqlock_nowait( xfs_dquot_t *dqp) { - return mutex_trylock(&dqp->q_qlock); + return (mutex_trylock(&((dqp)->q_qlock))); } void xfs_dqlock( xfs_dquot_t *dqp) { - mutex_lock(&dqp->q_qlock); + mutex_lock(&(dqp->q_qlock)); } void @@ -1458,7 +1468,7 @@ xfs_qm_dqpurge( * if we're turning off quotas. Basically, we need this flush * lock, and are willing to block on it. */ - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { /* * Block on the flush lock after nudging dquot buffer, * if it is incore. diff --git a/trunk/fs/xfs/quota/xfs_dquot.h b/trunk/fs/xfs/quota/xfs_dquot.h index 8958d0faf8d3..f7393bba4e95 100644 --- a/trunk/fs/xfs/quota/xfs_dquot.h +++ b/trunk/fs/xfs/quota/xfs_dquot.h @@ -82,7 +82,7 @@ typedef struct xfs_dquot { xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ mutex_t q_qlock; /* quota lock */ - struct completion q_flush; /* flush completion queue */ + sema_t q_flock; /* flush lock */ uint q_pincount; /* pin count for this dquot */ sv_t q_pinwait; /* sync var for pinning */ #ifdef XFS_DQUOT_TRACE @@ -113,25 +113,17 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) /* - * Manage the q_flush completion queue embedded in the dquot. This completion - * queue synchronizes processes attempting to flush the in-core dquot back to - * disk. + * The following three routines simply manage the q_flock + * semaphore embedded in the dquot. This semaphore synchronizes + * processes attempting to flush the in-core dquot back to disk. */ -static inline void xfs_dqflock(xfs_dquot_t *dqp) -{ - wait_for_completion(&dqp->q_flush); -} - -static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) -{ - return try_wait_for_completion(&dqp->q_flush); -} - -static inline void xfs_dqfunlock(xfs_dquot_t *dqp) -{ - complete(&dqp->q_flush); -} +#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ + (dqp)->dq_flags |= XFS_DQ_FLOCKED; } +#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \ + vsema(&((dqp)->q_flock)); \ + (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } +#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock))) #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) @@ -175,6 +167,7 @@ extern int xfs_qm_dqflush(xfs_dquot_t *, uint); extern int xfs_qm_dqpurge(xfs_dquot_t *); extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); +extern int xfs_qm_dqflock_nowait(xfs_dquot_t *); extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, xfs_disk_dquot_t *); diff --git a/trunk/fs/xfs/quota/xfs_dquot_item.c b/trunk/fs/xfs/quota/xfs_dquot_item.c index f028644caa5e..08d2fc89e6a1 100644 --- a/trunk/fs/xfs/quota/xfs_dquot_item.c +++ b/trunk/fs/xfs/quota/xfs_dquot_item.c @@ -151,7 +151,7 @@ xfs_qm_dquot_logitem_push( dqp = logitem->qli_dquot; ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(!completion_done(&dqp->q_flush)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); /* * Since we were able to lock the dquot's flush lock and @@ -245,7 +245,7 @@ xfs_qm_dquot_logitem_pushbuf( * inode flush completed and the inode was taken off the AIL. * So, just get out. */ - if (completion_done(&dqp->q_flush) || + if (!issemalocked(&(dqp->q_flock)) || ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); @@ -258,7 +258,7 @@ xfs_qm_dquot_logitem_pushbuf( if (bp != NULL) { if (XFS_BUF_ISDELAYWRITE(bp)) { dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && - !completion_done(&dqp->q_flush)); + issemalocked(&(dqp->q_flock))); qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); @@ -317,7 +317,7 @@ xfs_qm_dquot_logitem_trylock( return (XFS_ITEM_LOCKED); retval = XFS_ITEM_SUCCESS; - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { /* * The dquot is already being flushed. It may have been * flushed delayed write, however, and we don't want to diff --git a/trunk/fs/xfs/quota/xfs_qm.c b/trunk/fs/xfs/quota/xfs_qm.c index df0ffef9775a..021934a3d456 100644 --- a/trunk/fs/xfs/quota/xfs_qm.c +++ b/trunk/fs/xfs/quota/xfs_qm.c @@ -310,7 +310,8 @@ xfs_qm_unmount_quotadestroy( */ void xfs_qm_mount_quotas( - xfs_mount_t *mp) + xfs_mount_t *mp, + int mfsi_flags) { int error = 0; uint sbf; @@ -345,7 +346,8 @@ xfs_qm_mount_quotas( /* * If any of the quotas are not consistent, do a quotacheck. */ - if (XFS_QM_NEED_QUOTACHECK(mp)) { + if (XFS_QM_NEED_QUOTACHECK(mp) && + !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { error = xfs_qm_quotacheck(mp); if (error) { /* Quotacheck failed and disabled quotas. */ @@ -482,7 +484,7 @@ xfs_qm_dqflush_all( xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); /* XXX a sentinel would be better */ recl = XFS_QI_MPLRECLAIMS(mp); - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { /* * If we can't grab the flush lock then check * to see if the dquot has been flushed delayed @@ -1060,7 +1062,7 @@ xfs_qm_sync( /* XXX a sentinel would be better */ recl = XFS_QI_MPLRECLAIMS(mp); - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { if (nowait) { xfs_dqunlock(dqp); continue; @@ -2077,7 +2079,7 @@ xfs_qm_shake_freelist( * Try to grab the flush lock. If this dquot is in the process of * getting flushed to disk, we don't want to reclaim it. */ - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { xfs_dqunlock(dqp); dqp = dqp->dq_flnext; continue; @@ -2255,7 +2257,7 @@ xfs_qm_dqreclaim_one(void) * Try to grab the flush lock. If this dquot is in the process of * getting flushed to disk, we don't want to reclaim it. */ - if (!xfs_dqflock_nowait(dqp)) { + if (! xfs_qm_dqflock_nowait(dqp)) { xfs_dqunlock(dqp); continue; } diff --git a/trunk/fs/xfs/quota/xfs_qm.h b/trunk/fs/xfs/quota/xfs_qm.h index 44f25349e478..cd2300e374af 100644 --- a/trunk/fs/xfs/quota/xfs_qm.h +++ b/trunk/fs/xfs/quota/xfs_qm.h @@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct { #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); -extern void xfs_qm_mount_quotas(xfs_mount_t *); +extern void xfs_qm_mount_quotas(xfs_mount_t *, int); extern int xfs_qm_quotacheck(xfs_mount_t *); extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); extern int xfs_qm_unmount_quotas(xfs_mount_t *); diff --git a/trunk/fs/xfs/quota/xfs_qm_bhv.c b/trunk/fs/xfs/quota/xfs_qm_bhv.c index eea2e60b456b..f4f6c4c861d7 100644 --- a/trunk/fs/xfs/quota/xfs_qm_bhv.c +++ b/trunk/fs/xfs/quota/xfs_qm_bhv.c @@ -162,7 +162,7 @@ xfs_qm_newmount( * mounting, and get on with the boring life * without disk quotas. */ - xfs_qm_mount_quotas(mp); + xfs_qm_mount_quotas(mp, 0); } else { /* * Clear the quota flags, but remember them. This @@ -184,12 +184,13 @@ STATIC int xfs_qm_endmount( xfs_mount_t *mp, uint needquotamount, - uint quotaflags) + uint quotaflags, + int mfsi_flags) { if (needquotamount) { ASSERT(mp->m_qflags == 0); mp->m_qflags = quotaflags; - xfs_qm_mount_quotas(mp); + xfs_qm_mount_quotas(mp, mfsi_flags); } #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) diff --git a/trunk/fs/xfs/quota/xfs_qm_syscalls.c b/trunk/fs/xfs/quota/xfs_qm_syscalls.c index 1a3b803dfa55..adfb8723f65a 100644 --- a/trunk/fs/xfs/quota/xfs_qm_syscalls.c +++ b/trunk/fs/xfs/quota/xfs_qm_syscalls.c @@ -1034,7 +1034,7 @@ xfs_qm_dqrele_all_inodes( { xfs_inode_t *ip, *topino; uint ireclaims; - struct inode *vp; + bhv_vnode_t *vp; boolean_t vnode_refd; ASSERT(mp->m_quotainfo); @@ -1059,7 +1059,7 @@ xfs_qm_dqrele_all_inodes( ip = ip->i_mnext; continue; } - vp = VFS_I(ip); + vp = XFS_ITOV_NULL(ip); if (!vp) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); diff --git a/trunk/fs/xfs/xfs_acl.c b/trunk/fs/xfs/xfs_acl.c index b2f639a1416f..3e4648ad9cfc 100644 --- a/trunk/fs/xfs/xfs_acl.c +++ b/trunk/fs/xfs/xfs_acl.c @@ -37,15 +37,15 @@ #include #include -STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *); +STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *); STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); STATIC void xfs_acl_get_endian(xfs_acl_t *); STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); STATIC int xfs_acl_invalid(xfs_acl_t *); STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); -STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *); -STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *); -STATIC int xfs_acl_allow_set(struct inode *, int); +STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *); +STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *); +STATIC int xfs_acl_allow_set(bhv_vnode_t *, int); kmem_zone_t *xfs_acl_zone; @@ -55,7 +55,7 @@ kmem_zone_t *xfs_acl_zone; */ int xfs_acl_vhasacl_access( - struct inode *vp) + bhv_vnode_t *vp) { int error; @@ -68,7 +68,7 @@ xfs_acl_vhasacl_access( */ int xfs_acl_vhasacl_default( - struct inode *vp) + bhv_vnode_t *vp) { int error; @@ -207,7 +207,7 @@ posix_acl_xfs_to_xattr( int xfs_acl_vget( - struct inode *vp, + bhv_vnode_t *vp, void *acl, size_t size, int kind) @@ -217,6 +217,7 @@ xfs_acl_vget( posix_acl_xattr_header *ext_acl = acl; int flags = 0; + VN_HOLD(vp); if(size) { if (!(_ACL_ALLOC(xfs_acl))) { error = ENOMEM; @@ -238,10 +239,11 @@ xfs_acl_vget( goto out; } if (kind == _ACL_TYPE_ACCESS) - xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl); + xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl); error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); } out: + VN_RELE(vp); if(xfs_acl) _ACL_FREE(xfs_acl); return -error; @@ -249,26 +251,28 @@ xfs_acl_vget( int xfs_acl_vremove( - struct inode *vp, + bhv_vnode_t *vp, int kind) { int error; + VN_HOLD(vp); error = xfs_acl_allow_set(vp, kind); if (!error) { - error = xfs_attr_remove(XFS_I(vp), + error = xfs_attr_remove(xfs_vtoi(vp), kind == _ACL_TYPE_DEFAULT? SGI_ACL_DEFAULT: SGI_ACL_FILE, ATTR_ROOT); if (error == ENOATTR) error = 0; /* 'scool */ } + VN_RELE(vp); return -error; } int xfs_acl_vset( - struct inode *vp, + bhv_vnode_t *vp, void *acl, size_t size, int kind) @@ -294,6 +298,7 @@ xfs_acl_vset( return 0; } + VN_HOLD(vp); error = xfs_acl_allow_set(vp, kind); /* Incoming ACL exists, set file mode based on its value */ @@ -316,6 +321,7 @@ xfs_acl_vset( } out: + VN_RELE(vp); _ACL_FREE(xfs_acl); return -error; } @@ -357,7 +363,7 @@ xfs_acl_iaccess( STATIC int xfs_acl_allow_set( - struct inode *vp, + bhv_vnode_t *vp, int kind) { if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) @@ -366,7 +372,7 @@ xfs_acl_allow_set( return ENOTDIR; if (vp->i_sb->s_flags & MS_RDONLY) return EROFS; - if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) + if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) return EPERM; return 0; } @@ -560,7 +566,7 @@ xfs_acl_get_endian( */ STATIC void xfs_acl_get_attr( - struct inode *vp, + bhv_vnode_t *vp, xfs_acl_t *aclp, int kind, int flags, @@ -570,7 +576,7 @@ xfs_acl_get_attr( ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); flags |= ATTR_ROOT; - *error = xfs_attr_get(XFS_I(vp), + *error = xfs_attr_get(xfs_vtoi(vp), kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT, (char *)aclp, &len, flags); @@ -584,7 +590,7 @@ xfs_acl_get_attr( */ STATIC void xfs_acl_set_attr( - struct inode *vp, + bhv_vnode_t *vp, xfs_acl_t *aclp, int kind, int *error) @@ -609,7 +615,7 @@ xfs_acl_set_attr( INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); } INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); - *error = xfs_attr_set(XFS_I(vp), + *error = xfs_attr_set(xfs_vtoi(vp), kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT, (char *)newacl, len, ATTR_ROOT); @@ -618,7 +624,7 @@ xfs_acl_set_attr( int xfs_acl_vtoacl( - struct inode *vp, + bhv_vnode_t *vp, xfs_acl_t *access_acl, xfs_acl_t *default_acl) { @@ -633,7 +639,7 @@ xfs_acl_vtoacl( if (error) access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; else /* We have a good ACL and the file mode, synchronize. */ - xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl); + xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl); } if (default_acl) { @@ -650,7 +656,7 @@ xfs_acl_vtoacl( */ int xfs_acl_inherit( - struct inode *vp, + bhv_vnode_t *vp, mode_t mode, xfs_acl_t *pdaclp) { @@ -709,7 +715,7 @@ xfs_acl_inherit( */ STATIC int xfs_acl_setmode( - struct inode *vp, + bhv_vnode_t *vp, xfs_acl_t *acl, int *basicperms) { @@ -728,7 +734,7 @@ xfs_acl_setmode( * mode. The m:: bits take precedence over the g:: bits. */ iattr.ia_valid = ATTR_MODE; - iattr.ia_mode = XFS_I(vp)->i_d.di_mode; + iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode; iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); ap = acl->acl_entry; for (i = 0; i < acl->acl_cnt; ++i) { @@ -758,7 +764,7 @@ xfs_acl_setmode( if (gap && nomask) iattr.ia_mode |= gap->ae_perm << 3; - return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred); + return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred); } /* diff --git a/trunk/fs/xfs/xfs_acl.h b/trunk/fs/xfs/xfs_acl.h index a4e293b93efa..323ee94cf831 100644 --- a/trunk/fs/xfs/xfs_acl.h +++ b/trunk/fs/xfs/xfs_acl.h @@ -59,14 +59,14 @@ extern struct kmem_zone *xfs_acl_zone; (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) #define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) -extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *); +extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *); extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); -extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *); -extern int xfs_acl_vhasacl_access(struct inode *); -extern int xfs_acl_vhasacl_default(struct inode *); -extern int xfs_acl_vset(struct inode *, void *, size_t, int); -extern int xfs_acl_vget(struct inode *, void *, size_t, int); -extern int xfs_acl_vremove(struct inode *, int); +extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_vhasacl_access(bhv_vnode_t *); +extern int xfs_acl_vhasacl_default(bhv_vnode_t *); +extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int); +extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int); +extern int xfs_acl_vremove(bhv_vnode_t *, int); #define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) diff --git a/trunk/fs/xfs/xfs_arch.h b/trunk/fs/xfs/xfs_arch.h index 0b3b5efe848c..f9472a2076d4 100644 --- a/trunk/fs/xfs/xfs_arch.h +++ b/trunk/fs/xfs/xfs_arch.h @@ -92,6 +92,16 @@ ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ } +/* define generic INT_ macros */ + +#define INT_GET(reference,arch) \ + (((arch) == ARCH_NOCONVERT) \ + ? \ + (reference) \ + : \ + INT_SWAP((reference),(reference)) \ + ) + /* does not return a value */ #define INT_SET(reference,arch,valueref) \ (__builtin_constant_p(valueref) ? \ @@ -102,6 +112,64 @@ ) \ ) +/* does not return a value */ +#define INT_MOD_EXPR(reference,arch,code) \ + (((arch) == ARCH_NOCONVERT) \ + ? \ + (void)((reference) code) \ + : \ + (void)( \ + (reference) = INT_GET((reference),arch) , \ + ((reference) code), \ + INT_SET(reference, arch, reference) \ + ) \ + ) + +/* does not return a value */ +#define INT_MOD(reference,arch,delta) \ + (void)( \ + INT_MOD_EXPR(reference,arch,+=(delta)) \ + ) + +/* + * INT_COPY - copy a value between two locations with the + * _same architecture_ but _potentially different sizes_ + * + * if the types of the two parameters are equal or they are + * in native architecture, a simple copy is done + * + * otherwise, architecture conversions are done + * + */ + +/* does not return a value */ +#define INT_COPY(dst,src,arch) \ + ( \ + ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \ + ? \ + (void)((dst) = (src)) \ + : \ + INT_SET(dst, arch, INT_GET(src, arch)) \ + ) + +/* + * INT_XLATE - copy a value in either direction between two locations + * with different architectures + * + * dir < 0 - copy from memory to buffer (native to arch) + * dir > 0 - copy from buffer to memory (arch to native) + */ + +/* does not return a value */ +#define INT_XLATE(buf,mem,dir,arch) {\ + ASSERT(dir); \ + if (dir>0) { \ + (mem)=INT_GET(buf, arch); \ + } else { \ + INT_SET(buf, arch, mem); \ + } \ +} + /* * In directories inode numbers are stored as unaligned arrays of unsigned * 8bit integers on disk. diff --git a/trunk/fs/xfs/xfs_attr.c b/trunk/fs/xfs/xfs_attr.c index f7cdc28aff41..78de80e3caa2 100644 --- a/trunk/fs/xfs/xfs_attr.c +++ b/trunk/fs/xfs/xfs_attr.c @@ -194,46 +194,6 @@ xfs_attr_get( return(error); } -/* - * Calculate how many blocks we need for the new attribute, - */ -int -xfs_attr_calc_size( - struct xfs_inode *ip, - int namelen, - int valuelen, - int *local) -{ - struct xfs_mount *mp = ip->i_mount; - int size; - int nblks; - - /* - * Determine space new attribute will use, and if it would be - * "local" or "remote" (note: local != inline). - */ - size = xfs_attr_leaf_newentsize(namelen, valuelen, - mp->m_sb.sb_blocksize, local); - - nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); - if (*local) { - if (size > (mp->m_sb.sb_blocksize >> 1)) { - /* Double split possible */ - nblks *= 2; - } - } else { - /* - * Out of line attribute, cannot double split, but - * make room for the attribute value itself. - */ - uint dblocks = XFS_B_TO_FSB(mp, valuelen); - nblks += dblocks; - nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); - } - - return nblks; -} - STATIC int xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, char *value, int valuelen, int flags) @@ -242,9 +202,10 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, xfs_fsblock_t firstblock; xfs_bmap_free_t flist; int error, err2, committed; + int local, size; + uint nblks; xfs_mount_t *mp = dp->i_mount; int rsvd = (flags & ATTR_ROOT) != 0; - int local; /* * Attach the dquots to the inode. @@ -280,8 +241,30 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, args.whichfork = XFS_ATTR_FORK; args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; + /* + * Determine space new attribute will use, and if it would be + * "local" or "remote" (note: local != inline). + */ + size = xfs_attr_leaf_newentsize(name->len, valuelen, + mp->m_sb.sb_blocksize, &local); + + nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); + if (local) { + if (size > (mp->m_sb.sb_blocksize >> 1)) { + /* Double split possible */ + nblks <<= 1; + } + } else { + uint dblocks = XFS_B_TO_FSB(mp, valuelen); + /* Out of line attribute, cannot double split, but make + * room for the attribute value itself. + */ + nblks += dblocks; + nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); + } + /* Size is now blocks for attribute data */ - args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local); + args.total = nblks; /* * Start our first transaction of the day. @@ -303,17 +286,18 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, if (rsvd) args.trans->t_flags |= XFS_TRANS_RESERVE; - if ((error = xfs_trans_reserve(args.trans, args.total, - XFS_ATTRSET_LOG_RES(mp, args.total), 0, - XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) { + if ((error = xfs_trans_reserve(args.trans, (uint) nblks, + XFS_ATTRSET_LOG_RES(mp, nblks), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ATTRSET_LOG_COUNT))) { xfs_trans_cancel(args.trans, 0); return(error); } xfs_ilock(dp, XFS_ILOCK_EXCL); - error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0, - rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : - XFS_QMOPT_RES_REGBLKS); + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0, + rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); if (error) { xfs_iunlock(dp, XFS_ILOCK_EXCL); xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); @@ -400,9 +384,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, * Commit the leaf transformation. We'll need another (linked) * transaction to add the new attribute to the leaf. */ - - error = xfs_trans_roll(&args.trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args.trans, dp))) goto out; } @@ -982,8 +964,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) * Commit the current trans (including the inode) and start * a new one. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) return (error); /* @@ -997,8 +978,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) * Commit the transaction that added the attr name so that * later routines can manage their own transactions. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) return (error); /* @@ -1087,7 +1067,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) /* * Commit the remove and start the next trans in series. */ - error = xfs_trans_roll(&args->trans, dp); + error = xfs_attr_rolltrans(&args->trans, dp); } else if (args->rmtblkno > 0) { /* @@ -1318,8 +1298,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) * Commit the node conversion and start the next * trans in the chain. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) goto out; goto restart; @@ -1370,8 +1349,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) * Commit the leaf addition or btree split and start the next * trans in the chain. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) goto out; /* @@ -1471,8 +1449,7 @@ xfs_attr_node_addname(xfs_da_args_t *args) /* * Commit and start the next trans in the chain. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) goto out; } else if (args->rmtblkno > 0) { @@ -1604,8 +1581,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) /* * Commit the Btree join operation and start a new trans. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) goto out; } @@ -2106,8 +2082,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) /* * Start the next trans in the chain. */ - error = xfs_trans_roll(&args->trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, dp))) return (error); } @@ -2257,8 +2232,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) /* * Close out trans and start the next one in the chain. */ - error = xfs_trans_roll(&args->trans, args->dp); - if (error) + if ((error = xfs_attr_rolltrans(&args->trans, args->dp))) return (error); } return(0); diff --git a/trunk/fs/xfs/xfs_attr.h b/trunk/fs/xfs/xfs_attr.h index fb3b2a68b9b9..8b2d31c19e4d 100644 --- a/trunk/fs/xfs/xfs_attr.h +++ b/trunk/fs/xfs/xfs_attr.h @@ -129,7 +129,6 @@ typedef struct xfs_attr_list_context { /* * Overall external interface routines. */ -int xfs_attr_calc_size(struct xfs_inode *, int, int, int *); int xfs_attr_inactive(struct xfs_inode *dp); int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); int xfs_attr_rmtval_get(struct xfs_da_args *args); diff --git a/trunk/fs/xfs/xfs_attr_leaf.c b/trunk/fs/xfs/xfs_attr_leaf.c index 79da6b2ea99e..23ef5d7c87e1 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.c +++ b/trunk/fs/xfs/xfs_attr_leaf.c @@ -2498,7 +2498,9 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) /* * Commit the flag value change and start the next trans in series. */ - return xfs_trans_roll(&args->trans, args->dp); + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); } /* @@ -2545,7 +2547,9 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) /* * Commit the flag value change and start the next trans in series. */ - return xfs_trans_roll(&args->trans, args->dp); + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); } /* @@ -2661,7 +2665,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) /* * Commit the flag value change and start the next trans in series. */ - error = xfs_trans_roll(&args->trans, args->dp); + error = xfs_attr_rolltrans(&args->trans, args->dp); return(error); } @@ -2719,7 +2723,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) /* * Commit the invalidate and start the next transaction. */ - error = xfs_trans_roll(trans, dp); + error = xfs_attr_rolltrans(trans, dp); return (error); } @@ -2821,8 +2825,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, /* * Atomically commit the whole invalidate stuff. */ - error = xfs_trans_roll(trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(trans, dp))) return (error); } @@ -2961,8 +2964,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, /* * Roll to next transaction. */ - error = xfs_trans_roll(trans, dp); - if (error) + if ((error = xfs_attr_rolltrans(trans, dp))) return (error); } @@ -2972,3 +2974,60 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, return(0); } + + +/* + * Roll from one trans in the sequence of PERMANENT transactions to the next. + */ +int +xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp) +{ + xfs_trans_t *trans; + unsigned int logres, count; + int error; + + /* + * Ensure that the inode is always logged. + */ + trans = *transp; + xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); + + /* + * Copy the critical parameters from one trans to the next. + */ + logres = trans->t_log_res; + count = trans->t_log_count; + *transp = xfs_trans_dup(trans); + + /* + * Commit the current transaction. + * If this commit failed, then it'd just unlock those items that + * are not marked ihold. That also means that a filesystem shutdown + * is in progress. The caller takes the responsibility to cancel + * the duplicate transaction that gets returned. + */ + if ((error = xfs_trans_commit(trans, 0))) + return (error); + + trans = *transp; + + /* + * Reserve space in the log for th next transaction. + * This also pushes items in the "AIL", the list of logged items, + * out to disk if they are taking up space at the tail of the log + * that we want to use. This requires that either nothing be locked + * across this call, or that anything that is locked be logged in + * the prior and the next transactions. + */ + error = xfs_trans_reserve(trans, 0, logres, 0, + XFS_TRANS_PERM_LOG_RES, count); + /* + * Ensure that the inode is in the new transaction and locked. + */ + if (!error) { + xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(trans, dp); + } + return (error); + +} diff --git a/trunk/fs/xfs/xfs_attr_leaf.h b/trunk/fs/xfs/xfs_attr_leaf.h index 83e9af417ca2..5ecf437b7825 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.h +++ b/trunk/fs/xfs/xfs_attr_leaf.h @@ -274,4 +274,6 @@ int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp, struct xfs_dabuf *leaf2_bp); int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local); +int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp); + #endif /* __XFS_ATTR_LEAF_H__ */ diff --git a/trunk/fs/xfs/xfs_bit.c b/trunk/fs/xfs/xfs_bit.c index 48228848f5ae..fab0b6d5a41b 100644 --- a/trunk/fs/xfs/xfs_bit.c +++ b/trunk/fs/xfs/xfs_bit.c @@ -25,6 +25,109 @@ * XFS bit manipulation routines, used in non-realtime code. */ +#ifndef HAVE_ARCH_HIGHBIT +/* + * Index of high bit number in byte, -1 for none set, 0..7 otherwise. + */ +static const char xfs_highbit[256] = { + -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ + 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ +}; +#endif + +/* + * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. + */ +inline int +xfs_highbit32( + __uint32_t v) +{ +#ifdef HAVE_ARCH_HIGHBIT + return highbit32(v); +#else + int i; + + if (v & 0xffff0000) + if (v & 0xff000000) + i = 24; + else + i = 16; + else if (v & 0x0000ffff) + if (v & 0x0000ff00) + i = 8; + else + i = 0; + else + return -1; + return i + xfs_highbit[(v >> i) & 0xff]; +#endif +} + +/* + * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_lowbit64( + __uint64_t v) +{ + __uint32_t w = (__uint32_t)v; + int n = 0; + + if (w) { /* lower bits */ + n = ffs(w); + } else { /* upper bits */ + w = (__uint32_t)(v >> 32); + if (w && (n = ffs(w))) + n += 32; + } + return n - 1; +} + +/* + * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_highbit64( + __uint64_t v) +{ + __uint32_t h = (__uint32_t)(v >> 32); + + if (h) + return xfs_highbit32(h) + 32; + return xfs_highbit32((__uint32_t)v); +} + + /* * Return whether bitmap is empty. * Size is number of words in the bitmap, which is padded to word boundary diff --git a/trunk/fs/xfs/xfs_bit.h b/trunk/fs/xfs/xfs_bit.h index 8e0e463dae2d..082641a9782c 100644 --- a/trunk/fs/xfs/xfs_bit.h +++ b/trunk/fs/xfs/xfs_bit.h @@ -47,39 +47,13 @@ static inline __uint64_t xfs_mask64lo(int n) } /* Get high bit set out of 32-bit argument, -1 if none set */ -static inline int xfs_highbit32(__uint32_t v) -{ - return fls(v) - 1; -} - -/* Get high bit set out of 64-bit argument, -1 if none set */ -static inline int xfs_highbit64(__uint64_t v) -{ - return fls64(v) - 1; -} - -/* Get low bit set out of 32-bit argument, -1 if none set */ -static inline int xfs_lowbit32(__uint32_t v) -{ - unsigned long t = v; - return (v) ? find_first_bit(&t, 32) : -1; -} +extern int xfs_highbit32(__uint32_t v); /* Get low bit set out of 64-bit argument, -1 if none set */ -static inline int xfs_lowbit64(__uint64_t v) -{ - __uint32_t w = (__uint32_t)v; - int n = 0; +extern int xfs_lowbit64(__uint64_t v); - if (w) { /* lower bits */ - n = ffs(w); - } else { /* upper bits */ - w = (__uint32_t)(v >> 32); - if (w && (n = ffs(w))) - n += 32; - } - return n - 1; -} +/* Get high bit set out of 64-bit argument, -1 if none set */ +extern int xfs_highbit64(__uint64_t); /* Return whether bitmap is empty (1 == empty) */ extern int xfs_bitmap_empty(uint *map, uint size); diff --git a/trunk/fs/xfs/xfs_bmap.c b/trunk/fs/xfs/xfs_bmap.c index a1aab9275d5a..3c4beb3a4326 100644 --- a/trunk/fs/xfs/xfs_bmap.c +++ b/trunk/fs/xfs/xfs_bmap.c @@ -384,14 +384,14 @@ xfs_bmap_count_tree( int levelin, int *count); -STATIC void +STATIC int xfs_bmap_count_leaves( xfs_ifork_t *ifp, xfs_extnum_t idx, int numrecs, int *count); -STATIC void +STATIC int xfs_bmap_disk_count_leaves( xfs_extnum_t idx, xfs_bmbt_block_t *block, @@ -4000,7 +4000,7 @@ xfs_bmap_add_attrfork( ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; } ASSERT(ip->i_d.di_anextents == 0); - IHOLD(ip); + VN_HOLD(XFS_ITOV(ip)); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); switch (ip->i_d.di_format) { @@ -6096,7 +6096,7 @@ xfs_bmap_get_bp( tp = cur->bc_tp; licp = &tp->t_items; while (!bp && licp != NULL) { - if (xfs_lic_are_all_free(licp)) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { licp = licp->lic_next; continue; } @@ -6106,11 +6106,11 @@ xfs_bmap_get_bp( xfs_buf_log_item_t *bip; xfs_buf_t *lbp; - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - lidp = xfs_lic_slot(licp, i); + lidp = XFS_LIC_SLOT(licp, i); lip = lidp->lid_item; if (lip->li_type != XFS_LI_BUF) continue; @@ -6367,9 +6367,13 @@ xfs_bmap_count_blocks( mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { - xfs_bmap_count_leaves(ifp, 0, + if (unlikely(xfs_bmap_count_leaves(ifp, 0, ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), - count); + count) < 0)) { + XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } return 0; } @@ -6450,7 +6454,13 @@ xfs_bmap_count_tree( for (;;) { nextbno = be64_to_cpu(block->bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); - xfs_bmap_disk_count_leaves(0, block, numrecs, count); + if (unlikely(xfs_bmap_disk_count_leaves(0, + block, numrecs, count) < 0)) { + xfs_trans_brelse(tp, bp); + XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } xfs_trans_brelse(tp, bp); if (nextbno == NULLFSBLOCK) break; @@ -6468,7 +6478,7 @@ xfs_bmap_count_tree( /* * Count leaf blocks given a range of extent records. */ -STATIC void +STATIC int xfs_bmap_count_leaves( xfs_ifork_t *ifp, xfs_extnum_t idx, @@ -6481,13 +6491,14 @@ xfs_bmap_count_leaves( xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); *count += xfs_bmbt_get_blockcount(frp); } + return 0; } /* * Count leaf blocks given a range of extent records originally * in btree format. */ -STATIC void +STATIC int xfs_bmap_disk_count_leaves( xfs_extnum_t idx, xfs_bmbt_block_t *block, @@ -6501,4 +6512,5 @@ xfs_bmap_disk_count_leaves( frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b); *count += xfs_bmbt_disk_get_blockcount(frp); } + return 0; } diff --git a/trunk/fs/xfs/xfs_btree.c b/trunk/fs/xfs/xfs_btree.c index cc593a84c345..aeb87ca69fcc 100644 --- a/trunk/fs/xfs/xfs_btree.c +++ b/trunk/fs/xfs/xfs_btree.c @@ -46,10 +46,37 @@ kmem_zone_t *xfs_btree_cur_zone; /* * Btree magic numbers. */ -const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { +const __uint32_t xfs_magics[XFS_BTNUM_MAX] = +{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC }; +/* + * Prototypes for internal routines. + */ + +/* + * Checking routine: return maxrecs for the block. + */ +STATIC int /* number of records fitting in block */ +xfs_btree_maxrecs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block);/* generic btree block pointer */ + +/* + * Internal routines. + */ + +/* + * Retrieve the block pointer from the cursor at the given level. + * This may be a bmap btree root or from a buffer. + */ +STATIC xfs_btree_block_t * /* generic btree block pointer */ +xfs_btree_get_block( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree */ + struct xfs_buf **bpp); /* buffer containing the block */ + /* * Checking routine: return maxrecs for the block. */ @@ -429,6 +456,35 @@ xfs_btree_dup_cursor( return 0; } +/* + * Change the cursor to point to the first record at the given level. + * Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_firstrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to change */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + /* + * Get the block pointer for this level. + */ + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + /* + * It's empty, there is no such record. + */ + if (!block->bb_h.bb_numrecs) + return 0; + /* + * Set the ptr value to 1, that's the first record/key. + */ + cur->bc_ptrs[level] = 1; + return 1; +} + /* * Retrieve the block pointer from the cursor at the given level. * This may be a bmap btree root or from a buffer. @@ -570,13 +626,6 @@ xfs_btree_init_cursor( cur->bc_private.a.agbp = agbp; cur->bc_private.a.agno = agno; break; - case XFS_BTNUM_INO: - /* - * Inode allocation btree fields. - */ - cur->bc_private.a.agbp = agbp; - cur->bc_private.a.agno = agno; - break; case XFS_BTNUM_BMAP: /* * Bmap btree fields. @@ -589,6 +638,13 @@ xfs_btree_init_cursor( cur->bc_private.b.flags = 0; cur->bc_private.b.whichfork = whichfork; break; + case XFS_BTNUM_INO: + /* + * Inode allocation btree fields. + */ + cur->bc_private.i.agbp = agbp; + cur->bc_private.i.agno = agno; + break; default: ASSERT(0); } @@ -614,35 +670,6 @@ xfs_btree_islastblock( return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; } -/* - * Change the cursor to point to the first record at the given level. - * Other levels are unaffected. - */ -int /* success=1, failure=0 */ -xfs_btree_firstrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level) /* level to change */ -{ - xfs_btree_block_t *block; /* generic btree block pointer */ - xfs_buf_t *bp; /* buffer containing block */ - - /* - * Get the block pointer for this level. - */ - block = xfs_btree_get_block(cur, level, &bp); - xfs_btree_check_block(cur, block, level, bp); - /* - * It's empty, there is no such record. - */ - if (!block->bb_h.bb_numrecs) - return 0; - /* - * Set the ptr value to 1, that's the first record/key. - */ - cur->bc_ptrs[level] = 1; - return 1; -} - /* * Change the cursor to point to the last record in the current block * at the given level. Other levels are unaffected. @@ -863,12 +890,12 @@ xfs_btree_readahead_core( case XFS_BTNUM_INO: i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, be32_to_cpu(i->bb_leftsib), 1); rval++; } if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, be32_to_cpu(i->bb_rightsib), 1); rval++; } diff --git a/trunk/fs/xfs/xfs_btree.h b/trunk/fs/xfs/xfs_btree.h index 1f528a2a3754..7440b78f9cec 100644 --- a/trunk/fs/xfs/xfs_btree.h +++ b/trunk/fs/xfs/xfs_btree.h @@ -158,8 +158,8 @@ typedef struct xfs_btree_cur __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ xfs_btnum_t bc_btnum; /* identifies which btree type */ union { - struct { /* needed for BNO, CNT, INO */ - struct xfs_buf *agbp; /* agf/agi buffer pointer */ + struct { /* needed for BNO, CNT */ + struct xfs_buf *agbp; /* agf buffer pointer */ xfs_agnumber_t agno; /* ag number */ } a; struct { /* needed for BMAP */ @@ -172,6 +172,10 @@ typedef struct xfs_btree_cur char flags; /* flags */ #define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ } b; + struct { /* needed for INO */ + struct xfs_buf *agbp; /* agi buffer pointer */ + xfs_agnumber_t agno; /* ag number */ + } i; } bc_private; /* per-btree type data */ } xfs_btree_cur_t; diff --git a/trunk/fs/xfs/xfs_buf_item.c b/trunk/fs/xfs/xfs_buf_item.c index 608c30c3f76b..d86ca2c03a70 100644 --- a/trunk/fs/xfs/xfs_buf_item.c +++ b/trunk/fs/xfs/xfs_buf_item.c @@ -737,7 +737,7 @@ xfs_buf_item_init( bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); bip->bli_format.blf_map_size = map_size; #ifdef XFS_BLI_TRACE - bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS); + bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_TRANS_DEBUG @@ -1056,7 +1056,7 @@ xfs_buf_iodone_callbacks( anyway. */ XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); XFS_BUF_DONE(bp); - XFS_BUF_FINISH_IOWAIT(bp); + XFS_BUF_V_IODONESEMA(bp); } return; } diff --git a/trunk/fs/xfs/xfs_dfrag.c b/trunk/fs/xfs/xfs_dfrag.c index 760f4c5b5160..2211e885ef24 100644 --- a/trunk/fs/xfs/xfs_dfrag.c +++ b/trunk/fs/xfs/xfs_dfrag.c @@ -128,8 +128,10 @@ xfs_swap_extents( xfs_swapext_t *sxp) { xfs_mount_t *mp; + xfs_inode_t *ips[2]; xfs_trans_t *tp; xfs_bstat_t *sbp = &sxp->sx_stat; + bhv_vnode_t *vp, *tvp; xfs_ifork_t *tempifp, *ifp, *tifp; int ilf_fields, tilf_fields; static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; @@ -148,8 +150,19 @@ xfs_swap_extents( } sbp = &sxp->sx_stat; + vp = XFS_ITOV(ip); + tvp = XFS_ITOV(tip); + + /* Lock in i_ino order */ + if (ip->i_ino < tip->i_ino) { + ips[0] = ip; + ips[1] = tip; + } else { + ips[0] = tip; + ips[1] = ip; + } - xfs_lock_two_inodes(ip, tip, lock_flags); + xfs_lock_inodes(ips, 2, lock_flags); locked = 1; /* Verify that both files have the same format */ @@ -171,7 +184,7 @@ xfs_swap_extents( goto error0; } - if (VN_CACHED(VFS_I(tip)) != 0) { + if (VN_CACHED(tvp) != 0) { xfs_inval_cached_trace(tip, 0, -1, 0, -1); error = xfs_flushinval_pages(tip, 0, -1, FI_REMAPF_LOCKED); @@ -180,7 +193,7 @@ xfs_swap_extents( } /* Verify O_DIRECT for ftmp */ - if (VN_CACHED(VFS_I(tip)) != 0) { + if (VN_CACHED(tvp) != 0) { error = XFS_ERROR(EINVAL); goto error0; } @@ -224,7 +237,7 @@ xfs_swap_extents( * vop_read (or write in the case of autogrow) they block on the iolock * until we have switched the extents. */ - if (VN_MAPPED(VFS_I(ip))) { + if (VN_MAPPED(vp)) { error = XFS_ERROR(EBUSY); goto error0; } @@ -252,7 +265,7 @@ xfs_swap_extents( locked = 0; goto error0; } - xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); /* * Count the number of extended attribute blocks @@ -337,11 +350,15 @@ xfs_swap_extents( break; } + /* + * Increment vnode ref counts since xfs_trans_commit & + * xfs_trans_cancel will both unlock the inodes and + * decrement the associated ref counts. + */ + VN_HOLD(vp); + VN_HOLD(tvp); - IHOLD(ip); xfs_trans_ijoin(tp, ip, lock_flags); - - IHOLD(tip); xfs_trans_ijoin(tp, tip, lock_flags); xfs_trans_log_inode(tp, ip, ilf_fields); diff --git a/trunk/fs/xfs/xfs_error.c b/trunk/fs/xfs/xfs_error.c index f227ecd1a294..f66756cfb5e8 100644 --- a/trunk/fs/xfs/xfs_error.c +++ b/trunk/fs/xfs/xfs_error.c @@ -58,6 +58,9 @@ xfs_error_trap(int e) } return e; } +#endif + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) int xfs_etest[XFS_NUM_INJECT_ERROR]; int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; @@ -151,7 +154,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud) return 0; } -#endif /* DEBUG */ +#endif /* DEBUG || INDUCE_IO_ERROR */ static void xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) diff --git a/trunk/fs/xfs/xfs_error.h b/trunk/fs/xfs/xfs_error.h index 11543f10b0c6..d8559d132efa 100644 --- a/trunk/fs/xfs/xfs_error.h +++ b/trunk/fs/xfs/xfs_error.h @@ -125,14 +125,22 @@ extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp, #define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) #define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT -#ifdef DEBUG +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); #define XFS_NUM_INJECT_ERROR 10 + +#ifdef __ANSI_CPP__ +#define XFS_TEST_ERROR(expr, mp, tag, rf) \ + ((expr) || \ + xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \ + (rf))) +#else #define XFS_TEST_ERROR(expr, mp, tag, rf) \ ((expr) || \ xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ (rf))) +#endif /* __ANSI_CPP__ */ extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); @@ -140,7 +148,7 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); #define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) #define xfs_errortag_add(tag, mp) (ENOSYS) #define xfs_errortag_clearall(mp, loud) (ENOSYS) -#endif /* DEBUG */ +#endif /* (DEBUG || INDUCE_IO_ERROR) */ /* * XFS panic tags -- allow a call to xfs_cmn_err() be turned into diff --git a/trunk/fs/xfs/xfs_filestream.c b/trunk/fs/xfs/xfs_filestream.c index f3bb75da384e..c38fd14fca29 100644 --- a/trunk/fs/xfs/xfs_filestream.c +++ b/trunk/fs/xfs/xfs_filestream.c @@ -400,7 +400,7 @@ xfs_filestream_init(void) if (!item_zone) return -ENOMEM; #ifdef XFS_FILESTREAMS_TRACE - xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS); + xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP); #endif return 0; } diff --git a/trunk/fs/xfs/xfs_ialloc_btree.c b/trunk/fs/xfs/xfs_ialloc_btree.c index 83502f3edef0..e5310c90e50f 100644 --- a/trunk/fs/xfs/xfs_ialloc_btree.c +++ b/trunk/fs/xfs/xfs_ialloc_btree.c @@ -181,7 +181,7 @@ xfs_inobt_delrec( * then we can get rid of this level. */ if (numrecs == 1 && level > 0) { - agbp = cur->bc_private.a.agbp; + agbp = cur->bc_private.i.agbp; agi = XFS_BUF_TO_AGI(agbp); /* * pp is still set to the first pointer in the block. @@ -194,7 +194,7 @@ xfs_inobt_delrec( * Free the block. */ if ((error = xfs_free_extent(cur->bc_tp, - XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1))) + XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1))) return error; xfs_trans_binval(cur->bc_tp, bp); xfs_ialloc_log_agi(cur->bc_tp, agbp, @@ -379,7 +379,7 @@ xfs_inobt_delrec( rrecs = be16_to_cpu(right->bb_numrecs); rbp = bp; if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, lbno, 0, &lbp, + cur->bc_private.i.agno, lbno, 0, &lbp, XFS_INO_BTREE_REF))) return error; left = XFS_BUF_TO_INOBT_BLOCK(lbp); @@ -401,7 +401,7 @@ xfs_inobt_delrec( lrecs = be16_to_cpu(left->bb_numrecs); lbp = bp; if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, rbno, 0, &rbp, + cur->bc_private.i.agno, rbno, 0, &rbp, XFS_INO_BTREE_REF))) return error; right = XFS_BUF_TO_INOBT_BLOCK(rbp); @@ -484,7 +484,7 @@ xfs_inobt_delrec( xfs_buf_t *rrbp; if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, + cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0, &rrbp, XFS_INO_BTREE_REF))) return error; rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); @@ -497,7 +497,7 @@ xfs_inobt_delrec( * Free the deleting block. */ if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp, - cur->bc_private.a.agno, rbno), 1))) + cur->bc_private.i.agno, rbno), 1))) return error; xfs_trans_binval(cur->bc_tp, rbp); /* @@ -854,7 +854,7 @@ xfs_inobt_lookup( { xfs_agi_t *agi; /* a.g. inode header */ - agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); agno = be32_to_cpu(agi->agi_seqno); agbno = be32_to_cpu(agi->agi_root); } @@ -1089,7 +1089,7 @@ xfs_inobt_lshift( * Set up the left neighbor as "left". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), + cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib), 0, &lbp, XFS_INO_BTREE_REF))) return error; left = XFS_BUF_TO_INOBT_BLOCK(lbp); @@ -1207,10 +1207,10 @@ xfs_inobt_newroot( /* * Get a block & a buffer. */ - agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); args.tp = cur->bc_tp; args.mp = cur->bc_mp; - args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, be32_to_cpu(agi->agi_root)); args.mod = args.minleft = args.alignment = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; @@ -1233,7 +1233,7 @@ xfs_inobt_newroot( */ agi->agi_root = cpu_to_be32(args.agbno); be32_add_cpu(&agi->agi_level, 1); - xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp, + xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); /* * At the previous root level there are now two blocks: the old @@ -1376,7 +1376,7 @@ xfs_inobt_rshift( * Set up the right neighbor as "right". */ if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), + cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0, &rbp, XFS_INO_BTREE_REF))) return error; right = XFS_BUF_TO_INOBT_BLOCK(rbp); @@ -1492,7 +1492,7 @@ xfs_inobt_split( * Allocate the new block. * If we can't do it, we're toast. Give up. */ - args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno); + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno); args.mod = args.minleft = args.alignment = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.minlen = args.maxlen = args.prod = 1; @@ -1725,7 +1725,7 @@ xfs_inobt_decrement( agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, + cur->bc_private.i.agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) return error; lev--; @@ -1897,7 +1897,7 @@ xfs_inobt_increment( agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, + cur->bc_private.i.agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) return error; lev--; diff --git a/trunk/fs/xfs/xfs_iget.c b/trunk/fs/xfs/xfs_iget.c index e229e9e001c2..b07604b94d9f 100644 --- a/trunk/fs/xfs/xfs_iget.c +++ b/trunk/fs/xfs/xfs_iget.c @@ -216,14 +216,7 @@ xfs_iget_core( mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); init_waitqueue_head(&ip->i_ipin_wait); atomic_set(&ip->i_pincount, 0); - - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&ip->i_flush); - complete(&ip->i_flush); + initnsema(&ip->i_flock, 1, "xfsfino"); if (lock_flags) xfs_ilock(ip, lock_flags); @@ -294,18 +287,11 @@ xfs_iget_core( xfs_iflags_set(ip, XFS_IMODIFIED); *ipp = ip; - /* - * Set up the Linux with the Linux inode. - */ - ip->i_vnode = inode; - inode->i_private = ip; - /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ - if (ip->i_d.di_mode != 0) - xfs_setup_inode(ip); + xfs_initialize_vnode(mp, inode, ip); return 0; } @@ -425,11 +411,10 @@ xfs_iput(xfs_inode_t *ip, * Special iput for brand-new inodes that are still locked */ void -xfs_iput_new( - xfs_inode_t *ip, - uint lock_flags) +xfs_iput_new(xfs_inode_t *ip, + uint lock_flags) { - struct inode *inode = VFS_I(ip); + struct inode *inode = ip->i_vnode; xfs_itrace_entry(ip); @@ -790,3 +775,26 @@ xfs_isilocked( } #endif +/* + * The following three routines simply manage the i_flock + * semaphore embedded in the inode. This semaphore synchronizes + * processes attempting to flush the in-core inode back to disk. + */ +void +xfs_iflock(xfs_inode_t *ip) +{ + psema(&(ip->i_flock), PINOD|PLTWAIT); +} + +int +xfs_iflock_nowait(xfs_inode_t *ip) +{ + return (cpsema(&(ip->i_flock))); +} + +void +xfs_ifunlock(xfs_inode_t *ip) +{ + ASSERT(issemalocked(&(ip->i_flock))); + vsema(&(ip->i_flock)); +} diff --git a/trunk/fs/xfs/xfs_inode.c b/trunk/fs/xfs/xfs_inode.c index 00e80df9dd9d..bedc66163176 100644 --- a/trunk/fs/xfs/xfs_inode.c +++ b/trunk/fs/xfs/xfs_inode.c @@ -580,8 +580,8 @@ xfs_iformat_extents( xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); for (i = 0; i < nex; i++, dp++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); - ep->l0 = get_unaligned_be64(&dp->l0); - ep->l1 = get_unaligned_be64(&dp->l1); + ep->l0 = be64_to_cpu(get_unaligned(&dp->l0)); + ep->l1 = be64_to_cpu(get_unaligned(&dp->l1)); } XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); if (whichfork != XFS_DATA_FORK || @@ -835,22 +835,22 @@ xfs_iread( * Do this before xfs_iformat in case it adds entries. */ #ifdef XFS_INODE_TRACE - ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); + ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_BMAP_TRACE - ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); + ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_BMBT_TRACE - ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); + ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_RW_TRACE - ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); + ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_ILOCK_TRACE - ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); + ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); #endif #ifdef XFS_DIR2_TRACE - ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); + ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); #endif /* @@ -1046,9 +1046,9 @@ xfs_ialloc( { xfs_ino_t ino; xfs_inode_t *ip; + bhv_vnode_t *vp; uint flags; int error; - timespec_t tv; /* * Call the space management code to pick @@ -1077,12 +1077,13 @@ xfs_ialloc( } ASSERT(ip != NULL); + vp = XFS_ITOV(ip); ip->i_d.di_mode = (__uint16_t)mode; ip->i_d.di_onlink = 0; ip->i_d.di_nlink = nlink; ASSERT(ip->i_d.di_nlink == nlink); - ip->i_d.di_uid = current_fsuid(); - ip->i_d.di_gid = current_fsgid(); + ip->i_d.di_uid = current_fsuid(cr); + ip->i_d.di_gid = current_fsgid(cr); ip->i_d.di_projid = prid; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); @@ -1129,13 +1130,7 @@ xfs_ialloc( ip->i_size = 0; ip->i_d.di_nextents = 0; ASSERT(ip->i_d.di_nblocks == 0); - - nanotime(&tv); - ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; - ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; - ip->i_d.di_atime = ip->i_d.di_mtime; - ip->i_d.di_ctime = ip->i_d.di_mtime; - + xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); /* * di_gen will have been taken care of in xfs_iread. */ @@ -1225,7 +1220,7 @@ xfs_ialloc( xfs_trans_log_inode(tp, ip, flags); /* now that we have an i_mode we can setup inode ops and unlock */ - xfs_setup_inode(ip); + xfs_initialize_vnode(tp->t_mountp, vp, ip); *ipp = ip; return 0; @@ -1404,6 +1399,7 @@ xfs_itruncate_start( xfs_fsize_t last_byte; xfs_off_t toss_start; xfs_mount_t *mp; + bhv_vnode_t *vp; int error = 0; ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); @@ -1412,6 +1408,7 @@ xfs_itruncate_start( (flags == XFS_ITRUNC_MAYBE)); mp = ip->i_mount; + vp = XFS_ITOV(ip); /* wait for the completion of any pending DIOs */ if (new_size < ip->i_size) @@ -1460,7 +1457,7 @@ xfs_itruncate_start( #ifdef DEBUG if (new_size == 0) { - ASSERT(VN_CACHED(VFS_I(ip)) == 0); + ASSERT(VN_CACHED(vp) == 0); } #endif return error; @@ -2633,6 +2630,7 @@ xfs_idestroy( xfs_idestroy_fork(ip, XFS_ATTR_FORK); mrfree(&ip->i_lock); mrfree(&ip->i_iolock); + freesema(&ip->i_flock); #ifdef XFS_INODE_TRACE ktrace_free(ip->i_trace); @@ -3050,10 +3048,10 @@ xfs_iflush_cluster( /* * xfs_iflush() will write a modified inode's changes out to the * inode's on disk home. The caller must have the inode lock held - * in at least shared mode and the inode flush completion must be - * active as well. The inode lock will still be held upon return from + * in at least shared mode and the inode flush semaphore must be + * held as well. The inode lock will still be held upon return from * the call and the caller is free to unlock it. - * The inode flush will be completed when the inode reaches the disk. + * The inode flush lock will be unlocked when the inode reaches the disk. * The flags indicate how the inode's buffer should be written out. */ int @@ -3072,7 +3070,7 @@ xfs_iflush( XFS_STATS_INC(xs_iflush_count); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); - ASSERT(!completion_done(&ip->i_flush)); + ASSERT(issemalocked(&(ip->i_flock))); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > ip->i_df.if_ext_max); @@ -3235,7 +3233,7 @@ xfs_iflush_int( #endif ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); - ASSERT(!completion_done(&ip->i_flush)); + ASSERT(issemalocked(&(ip->i_flock))); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > ip->i_df.if_ext_max); @@ -3467,6 +3465,7 @@ xfs_iflush_all( xfs_mount_t *mp) { xfs_inode_t *ip; + bhv_vnode_t *vp; again: XFS_MOUNT_ILOCK(mp); @@ -3481,13 +3480,14 @@ xfs_iflush_all( continue; } - if (!VFS_I(ip)) { + vp = XFS_ITOV_NULL(ip); + if (!vp) { XFS_MOUNT_IUNLOCK(mp); xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); goto again; } - ASSERT(vn_count(VFS_I(ip)) == 0); + ASSERT(vn_count(vp) == 0); ip = ip->i_mnext; } while (ip != mp->m_inodes); @@ -3707,7 +3707,7 @@ xfs_iext_add_indirect_multi( * (all extents past */ if (nex2) { byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); - nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); + nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); erp->er_extcount -= nex2; xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); @@ -4007,7 +4007,8 @@ xfs_iext_realloc_direct( ifp->if_u1.if_extents = kmem_realloc(ifp->if_u1.if_extents, rnew_size, - ifp->if_real_bytes, KM_NOFS); + ifp->if_real_bytes, + KM_SLEEP); } if (rnew_size > ifp->if_real_bytes) { memset(&ifp->if_u1.if_extents[ifp->if_bytes / @@ -4066,7 +4067,7 @@ xfs_iext_inline_to_direct( xfs_ifork_t *ifp, /* inode fork pointer */ int new_size) /* number of extents in file */ { - ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); + ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP); memset(ifp->if_u1.if_extents, 0, new_size); if (ifp->if_bytes) { memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, @@ -4098,7 +4099,7 @@ xfs_iext_realloc_indirect( } else { ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) kmem_realloc(ifp->if_u1.if_ext_irec, - new_size, size, KM_NOFS); + new_size, size, KM_SLEEP); } } @@ -4340,10 +4341,11 @@ xfs_iext_irec_init( nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(nextents <= XFS_LINEAR_EXTS); - erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); + erp = (xfs_ext_irec_t *) + kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); if (nextents == 0) { - ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); + ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); } else if (!ifp->if_real_bytes) { xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { @@ -4391,7 +4393,7 @@ xfs_iext_irec_new( /* Initialize new extent record */ erp = ifp->if_u1.if_ext_irec; - erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); + erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); erp[erp_idx].er_extcount = 0; diff --git a/trunk/fs/xfs/xfs_inode.h b/trunk/fs/xfs/xfs_inode.h index 1420c49674d7..17a04b6321ed 100644 --- a/trunk/fs/xfs/xfs_inode.h +++ b/trunk/fs/xfs/xfs_inode.h @@ -87,7 +87,8 @@ typedef struct xfs_ifork { * Flags for xfs_ichgtime(). */ #define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ -#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */ +#define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */ +#define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */ /* * Per-fork incore inode flags. @@ -203,7 +204,7 @@ typedef struct xfs_inode { struct xfs_inode *i_mprev; /* ptr to prev inode */ struct xfs_mount *i_mount; /* fs mount struct ptr */ struct list_head i_reclaim; /* reclaim list */ - struct inode *i_vnode; /* vnode backpointer */ + bhv_vnode_t *i_vnode; /* vnode backpointer */ struct xfs_dquot *i_udquot; /* user dquot */ struct xfs_dquot *i_gdquot; /* group dquot */ @@ -222,7 +223,7 @@ typedef struct xfs_inode { struct xfs_inode_log_item *i_itemp; /* logging information */ mrlock_t i_lock; /* inode lock */ mrlock_t i_iolock; /* inode IO lock */ - struct completion i_flush; /* inode flush completion q */ + sema_t i_flock; /* inode flush lock */ atomic_t i_pincount; /* inode pin count */ wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ spinlock_t i_flags_lock; /* inode i_flags lock */ @@ -262,18 +263,6 @@ typedef struct xfs_inode { #define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ (ip)->i_size : (ip)->i_d.di_size; -/* Convert from vfs inode to xfs inode */ -static inline struct xfs_inode *XFS_I(struct inode *inode) -{ - return (struct xfs_inode *)inode->i_private; -} - -/* convert from xfs inode to vfs inode */ -static inline struct inode *VFS_I(struct xfs_inode *ip) -{ - return (struct inode *)ip->i_vnode; -} - /* * i_flags helper functions */ @@ -450,6 +439,9 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) #define XFS_ITRUNC_DEFINITE 0x1 #define XFS_ITRUNC_MAYBE 0x2 +#define XFS_ITOV(ip) ((ip)->i_vnode) +#define XFS_ITOV_NULL(ip) ((ip)->i_vnode) + /* * For multiple groups support: if S_ISGID bit is set in the parent * directory, group of new file is set to that of the parent, and @@ -481,8 +473,11 @@ int xfs_ilock_nowait(xfs_inode_t *, uint); void xfs_iunlock(xfs_inode_t *, uint); void xfs_ilock_demote(xfs_inode_t *, uint); int xfs_isilocked(xfs_inode_t *, uint); +void xfs_iflock(xfs_inode_t *); +int xfs_iflock_nowait(xfs_inode_t *); uint xfs_ilock_map_shared(xfs_inode_t *); void xfs_iunlock_map_shared(xfs_inode_t *, uint); +void xfs_ifunlock(xfs_inode_t *); void xfs_ireclaim(xfs_inode_t *); int xfs_finish_reclaim(xfs_inode_t *, int, int); int xfs_finish_reclaim_all(struct xfs_mount *, int); @@ -527,7 +522,6 @@ void xfs_iflush_all(struct xfs_mount *); void xfs_ichgtime(xfs_inode_t *, int); xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); void xfs_lock_inodes(xfs_inode_t **, int, uint); -void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); void xfs_synchronize_atime(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *); @@ -576,26 +570,6 @@ extern struct kmem_zone *xfs_ifork_zone; extern struct kmem_zone *xfs_inode_zone; extern struct kmem_zone *xfs_ili_zone; -/* - * Manage the i_flush queue embedded in the inode. This completion - * queue synchronizes processes attempting to flush the in-core - * inode back to disk. - */ -static inline void xfs_iflock(xfs_inode_t *ip) -{ - wait_for_completion(&ip->i_flush); -} - -static inline int xfs_iflock_nowait(xfs_inode_t *ip) -{ - return try_wait_for_completion(&ip->i_flush); -} - -static inline void xfs_ifunlock(xfs_inode_t *ip) -{ - complete(&ip->i_flush); -} - #endif /* __KERNEL__ */ #endif /* __XFS_INODE_H__ */ diff --git a/trunk/fs/xfs/xfs_inode_item.c b/trunk/fs/xfs/xfs_inode_item.c index 97c7452e2620..0eee08a32c26 100644 --- a/trunk/fs/xfs/xfs_inode_item.c +++ b/trunk/fs/xfs/xfs_inode_item.c @@ -779,10 +779,11 @@ xfs_inode_item_pushbuf( ASSERT(iip->ili_push_owner == current_pid()); /* - * If a flush is not in progress anymore, chances are that the - * inode was taken off the AIL. So, just get out. + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. */ - if (completion_done(&ip->i_flush) || + if (!issemalocked(&(ip->i_flock)) || ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { iip->ili_pushbuf_flag = 0; xfs_iunlock(ip, XFS_ILOCK_SHARED); @@ -804,7 +805,7 @@ xfs_inode_item_pushbuf( * If not, we can flush it async. */ dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && - !completion_done(&ip->i_flush)); + issemalocked(&(ip->i_flock))); iip->ili_pushbuf_flag = 0; xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_buftrace("INODE ITEM PUSH", bp); @@ -857,7 +858,7 @@ xfs_inode_item_push( ip = iip->ili_inode; ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); - ASSERT(!completion_done(&ip->i_flush)); + ASSERT(issemalocked(&(ip->i_flock))); /* * Since we were able to lock the inode's flush lock and * we found it on the AIL, the inode must be dirty. This diff --git a/trunk/fs/xfs/xfs_itable.c b/trunk/fs/xfs/xfs_itable.c index cf6754a3c5b3..9a3ef9dcaeb9 100644 --- a/trunk/fs/xfs/xfs_itable.c +++ b/trunk/fs/xfs/xfs_itable.c @@ -59,6 +59,7 @@ xfs_bulkstat_one_iget( { xfs_icdinode_t *dic; /* dinode core info pointer */ xfs_inode_t *ip; /* incore inode pointer */ + bhv_vnode_t *vp; int error; error = xfs_iget(mp, NULL, ino, @@ -71,6 +72,7 @@ xfs_bulkstat_one_iget( ASSERT(ip != NULL); ASSERT(ip->i_blkno != (xfs_daddr_t)0); + vp = XFS_ITOV(ip); dic = &ip->i_d; /* xfs_iget returns the following without needing @@ -83,7 +85,7 @@ xfs_bulkstat_one_iget( buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; buf->bs_size = dic->di_size; - vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime); + vn_atime_to_bstime(vp, &buf->bs_atime); buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; diff --git a/trunk/fs/xfs/xfs_log.c b/trunk/fs/xfs/xfs_log.c index ccba14eb9dbe..91b00a5686cd 100644 --- a/trunk/fs/xfs/xfs_log.c +++ b/trunk/fs/xfs/xfs_log.c @@ -160,7 +160,7 @@ void xlog_trace_iclog(xlog_in_core_t *iclog, uint state) { if (!iclog->ic_trace) - iclog->ic_trace = ktrace_alloc(256, KM_NOFS); + iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); ktrace_enter(iclog->ic_trace, (void *)((unsigned long)state), (void *)((unsigned long)current_pid()), @@ -336,13 +336,16 @@ xfs_log_done(xfs_mount_t *mp, } else { xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); xlog_regrant_reserve_log_space(log, ticket); - /* If this ticket was a permanent reservation and we aren't - * trying to release it, reset the inited flags; so next time - * we write, a start record will be written out. - */ - ticket->t_flags |= XLOG_TIC_INITED; } + /* If this ticket was a permanent reservation and we aren't + * trying to release it, reset the inited flags; so next time + * we write, a start record will be written out. + */ + if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) && + (flags & XFS_LOG_REL_PERM_RESERV) == 0) + ticket->t_flags |= XLOG_TIC_INITED; + return lsn; } /* xfs_log_done */ @@ -354,11 +357,11 @@ xfs_log_done(xfs_mount_t *mp, * Asynchronous forces are implemented by setting the WANT_SYNC * bit in the appropriate in-core log and then returning. * - * Synchronous forces are implemented with a signal variable. All callers - * to force a given lsn to disk will wait on a the sv attached to the + * Synchronous forces are implemented with a semaphore. All callers + * to force a given lsn to disk will wait on a semaphore attached to the * specific in-core log. When given in-core log finally completes its * write to disk, that thread will wake up all threads waiting on the - * sv. + * semaphore. */ int _xfs_log_force( @@ -585,12 +588,12 @@ xfs_log_mount( * mp - ubiquitous xfs mount point structure */ int -xfs_log_mount_finish(xfs_mount_t *mp) +xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) { int error; if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) - error = xlog_recover_finish(mp->m_log); + error = xlog_recover_finish(mp->m_log, mfsi_flags); else { error = 0; ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); @@ -704,7 +707,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) if (!(iclog->ic_state == XLOG_STATE_ACTIVE || iclog->ic_state == XLOG_STATE_DIRTY)) { if (!XLOG_FORCED_SHUTDOWN(log)) { - sv_wait(&iclog->ic_force_wait, PMEM, + sv_wait(&iclog->ic_forcesema, PMEM, &log->l_icloglock, s); } else { spin_unlock(&log->l_icloglock); @@ -745,7 +748,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) || iclog->ic_state == XLOG_STATE_DIRTY || iclog->ic_state == XLOG_STATE_IOERROR) ) { - sv_wait(&iclog->ic_force_wait, PMEM, + sv_wait(&iclog->ic_forcesema, PMEM, &log->l_icloglock, s); } else { spin_unlock(&log->l_icloglock); @@ -835,7 +838,7 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= tic->t_unit_res; - sv_signal(&tic->t_wait); + sv_signal(&tic->t_sema); tic = tic->t_next; } while (tic != log->l_write_headq); } @@ -856,7 +859,7 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= need_bytes; - sv_signal(&tic->t_wait); + sv_signal(&tic->t_sema); tic = tic->t_next; } while (tic != log->l_reserve_headq); } @@ -1282,8 +1285,8 @@ xlog_alloc_log(xfs_mount_t *mp, ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); - sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); - sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); + sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); + sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); iclogp = &iclog->ic_next; } @@ -1562,8 +1565,8 @@ xlog_dealloc_log(xlog_t *log) iclog = log->l_iclog; for (i=0; il_iclog_bufs; i++) { - sv_destroy(&iclog->ic_force_wait); - sv_destroy(&iclog->ic_write_wait); + sv_destroy(&iclog->ic_forcesema); + sv_destroy(&iclog->ic_writesema); xfs_buf_free(iclog->ic_bp); #ifdef XFS_LOG_TRACE if (iclog->ic_trace != NULL) { @@ -1973,7 +1976,7 @@ xlog_write(xfs_mount_t * mp, /* Clean iclogs starting from the head. This ordering must be * maintained, so an iclog doesn't become ACTIVE beyond one that * is SYNCING. This is also required to maintain the notion that we use - * a ordered wait queue to hold off would be writers to the log when every + * a counting semaphore to hold off would be writers to the log when every * iclog is trying to sync to disk. * * State Change: DIRTY -> ACTIVE @@ -2237,7 +2240,7 @@ xlog_state_do_callback( xlog_state_clean_log(log); /* wake up threads waiting in xfs_log_force() */ - sv_broadcast(&iclog->ic_force_wait); + sv_broadcast(&iclog->ic_forcesema); iclog = iclog->ic_next; } while (first_iclog != iclog); @@ -2299,7 +2302,8 @@ xlog_state_do_callback( * the second completion goes through. * * Callbacks could take time, so they are done outside the scope of the - * global state machine log lock. + * global state machine log lock. Assume that the calls to cvsema won't + * take a long time. At least we know it won't sleep. */ STATIC void xlog_state_done_syncing( @@ -2335,7 +2339,7 @@ xlog_state_done_syncing( * iclog buffer, we wake them all, one will get to do the * I/O, the others get to wait for the result. */ - sv_broadcast(&iclog->ic_write_wait); + sv_broadcast(&iclog->ic_writesema); spin_unlock(&log->l_icloglock); xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ } /* xlog_state_done_syncing */ @@ -2343,9 +2347,11 @@ xlog_state_done_syncing( /* * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must - * sleep. We wait on the flush queue on the head iclog as that should be - * the first iclog to complete flushing. Hence if all iclogs are syncing, - * we will wait here and all new writes will sleep until a sync completes. + * sleep. The flush semaphore is set to the number of in-core buffers and + * decremented around disk syncing. Therefore, if all buffers are syncing, + * this semaphore will cause new writes to sleep until a sync completes. + * Otherwise, this code just does p() followed by v(). This approximates + * a sleep/wakeup except we can't race. * * The in-core logs are used in a circular fashion. They are not used * out-of-order even when an iclog past the head is free. @@ -2502,7 +2508,7 @@ xlog_grant_log_space(xlog_t *log, goto error_return; XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); /* * If we got an error, and the filesystem is shutting down, * we'll catch it down below. So just continue... @@ -2528,7 +2534,7 @@ xlog_grant_log_space(xlog_t *log, xlog_trace_loggrant(log, tic, "xlog_grant_log_space: sleep 2"); XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); if (XLOG_FORCED_SHUTDOWN(log)) { spin_lock(&log->l_grant_lock); @@ -2627,7 +2633,7 @@ xlog_regrant_write_log_space(xlog_t *log, if (free_bytes < ntic->t_unit_res) break; free_bytes -= ntic->t_unit_res; - sv_signal(&ntic->t_wait); + sv_signal(&ntic->t_sema); ntic = ntic->t_next; } while (ntic != log->l_write_headq); @@ -2638,7 +2644,7 @@ xlog_regrant_write_log_space(xlog_t *log, xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: sleep 1"); XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); /* If we're shutting down, this tic is already @@ -2667,7 +2673,7 @@ xlog_regrant_write_log_space(xlog_t *log, if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) xlog_ins_ticketq(&log->l_write_headq, tic); XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); /* If we're shutting down, this tic is already off the queue */ if (XLOG_FORCED_SHUTDOWN(log)) { @@ -2910,7 +2916,7 @@ xlog_state_switch_iclogs(xlog_t *log, * 2. the current iclog is drity, and the previous iclog is in the * active or dirty state. * - * We may sleep if: + * We may sleep (call psema) if: * * 1. the current iclog is not in the active nor dirty state. * 2. the current iclog dirty, and the previous iclog is not in the @@ -3007,7 +3013,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) return XFS_ERROR(EIO); } XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s); + sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); /* * No need to grab the log lock here since we're * only deciding whether or not to return EIO @@ -3090,7 +3096,7 @@ xlog_state_sync(xlog_t *log, XLOG_STATE_SYNCING))) { ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_prev->ic_write_wait, PSWP, + sv_wait(&iclog->ic_prev->ic_writesema, PSWP, &log->l_icloglock, s); *log_flushed = 1; already_slept = 1; @@ -3110,7 +3116,7 @@ xlog_state_sync(xlog_t *log, !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { /* - * Don't wait on completion if we know that we've + * Don't wait on the forcesema if we know that we've * gotten a log write error. */ if (iclog->ic_state & XLOG_STATE_IOERROR) { @@ -3118,7 +3124,7 @@ xlog_state_sync(xlog_t *log, return XFS_ERROR(EIO); } XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); + sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); /* * No need to grab the log lock here since we're * only deciding whether or not to return EIO @@ -3174,7 +3180,7 @@ STATIC void xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket) { - sv_destroy(&ticket->t_wait); + sv_destroy(&ticket->t_sema); kmem_zone_free(xfs_log_ticket_zone, ticket); } /* xlog_ticket_put */ @@ -3264,7 +3270,7 @@ xlog_ticket_get(xlog_t *log, tic->t_trans_type = 0; if (xflags & XFS_LOG_PERM_RESERV) tic->t_flags |= XLOG_TIC_PERM_RESERV; - sv_init(&(tic->t_wait), SV_DEFAULT, "logtick"); + sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); xlog_tic_reset_res(tic); @@ -3551,14 +3557,14 @@ xfs_log_force_umount( */ if ((tic = log->l_reserve_headq)) { do { - sv_signal(&tic->t_wait); + sv_signal(&tic->t_sema); tic = tic->t_next; } while (tic != log->l_reserve_headq); } if ((tic = log->l_write_headq)) { do { - sv_signal(&tic->t_wait); + sv_signal(&tic->t_sema); tic = tic->t_next; } while (tic != log->l_write_headq); } diff --git a/trunk/fs/xfs/xfs_log.h b/trunk/fs/xfs/xfs_log.h index d47b91f10822..d1d678ecb63e 100644 --- a/trunk/fs/xfs/xfs_log.h +++ b/trunk/fs/xfs/xfs_log.h @@ -149,7 +149,7 @@ int xfs_log_mount(struct xfs_mount *mp, struct xfs_buftarg *log_target, xfs_daddr_t start_block, int num_bblocks); -int xfs_log_mount_finish(struct xfs_mount *mp); +int xfs_log_mount_finish(struct xfs_mount *mp, int); void xfs_log_move_tail(struct xfs_mount *mp, xfs_lsn_t tail_lsn); int xfs_log_notify(struct xfs_mount *mp, diff --git a/trunk/fs/xfs/xfs_log_priv.h b/trunk/fs/xfs/xfs_log_priv.h index c8a5b22ee3e3..6245913196b4 100644 --- a/trunk/fs/xfs/xfs_log_priv.h +++ b/trunk/fs/xfs/xfs_log_priv.h @@ -241,7 +241,7 @@ typedef struct xlog_res { } xlog_res_t; typedef struct xlog_ticket { - sv_t t_wait; /* ticket wait queue : 20 */ + sv_t t_sema; /* sleep on this semaphore : 20 */ struct xlog_ticket *t_next; /* :4|8 */ struct xlog_ticket *t_prev; /* :4|8 */ xlog_tid_t t_tid; /* transaction identifier : 4 */ @@ -314,7 +314,7 @@ typedef struct xlog_rec_ext_header { * xlog_rec_header_t into the reserved space. * - ic_data follows, so a write to disk can start at the beginning of * the iclog. - * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. + * - ic_forcesema is used to implement synchronous forcing of the iclog to disk. * - ic_next is the pointer to the next iclog in the ring. * - ic_bp is a pointer to the buffer used to write this incore log to disk. * - ic_log is a pointer back to the global log structure. @@ -339,8 +339,8 @@ typedef struct xlog_rec_ext_header { * and move everything else out to subsequent cachelines. */ typedef struct xlog_iclog_fields { - sv_t ic_force_wait; - sv_t ic_write_wait; + sv_t ic_forcesema; + sv_t ic_writesema; struct xlog_in_core *ic_next; struct xlog_in_core *ic_prev; struct xfs_buf *ic_bp; @@ -377,8 +377,8 @@ typedef struct xlog_in_core { /* * Defines to save our code from this glop. */ -#define ic_force_wait hic_fields.ic_force_wait -#define ic_write_wait hic_fields.ic_write_wait +#define ic_forcesema hic_fields.ic_forcesema +#define ic_writesema hic_fields.ic_writesema #define ic_next hic_fields.ic_next #define ic_prev hic_fields.ic_prev #define ic_bp hic_fields.ic_bp @@ -468,7 +468,7 @@ extern int xlog_find_tail(xlog_t *log, xfs_daddr_t *head_blk, xfs_daddr_t *tail_blk); extern int xlog_recover(xlog_t *log); -extern int xlog_recover_finish(xlog_t *log); +extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); extern void xlog_recover_process_iunlinks(xlog_t *log); diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index 82d46ce69d5f..9eb722ec744e 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -3940,7 +3940,8 @@ xlog_recover( */ int xlog_recover_finish( - xlog_t *log) + xlog_t *log, + int mfsi_flags) { /* * Now we're ready to do the transactions needed for the @@ -3968,7 +3969,9 @@ xlog_recover_finish( xfs_log_force(log->l_mp, (xfs_lsn_t)0, (XFS_LOG_FORCE | XFS_LOG_SYNC)); - xlog_recover_process_iunlinks(log); + if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) { + xlog_recover_process_iunlinks(log); + } xlog_recover_check_summary(log); diff --git a/trunk/fs/xfs/xfs_mount.c b/trunk/fs/xfs/xfs_mount.c index a4503f5e9497..6c5d1325e7f6 100644 --- a/trunk/fs/xfs/xfs_mount.c +++ b/trunk/fs/xfs/xfs_mount.c @@ -128,7 +128,7 @@ static const struct { * initialized. */ STATIC void -xfs_free_perag( +xfs_mount_free( xfs_mount_t *mp) { if (mp->m_perag) { @@ -139,6 +139,20 @@ xfs_free_perag( kmem_free(mp->m_perag[agno].pagb_list); kmem_free(mp->m_perag); } + + spinlock_destroy(&mp->m_ail_lock); + spinlock_destroy(&mp->m_sb_lock); + mutex_destroy(&mp->m_ilock); + mutex_destroy(&mp->m_growlock); + if (mp->m_quotainfo) + XFS_QM_DONE(mp); + + if (mp->m_fsname != NULL) + kmem_free(mp->m_fsname); + if (mp->m_rtname != NULL) + kmem_free(mp->m_rtname); + if (mp->m_logname != NULL) + kmem_free(mp->m_logname); } /* @@ -690,11 +704,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) * Update alignment values based on mount options and sb values */ STATIC int -xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags) +xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) { xfs_sb_t *sbp = &(mp->m_sb); - if (mp->m_dalign) { + if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { /* * If stripe unit and stripe width are not multiples * of the fs blocksize turn off alignment. @@ -850,7 +864,7 @@ xfs_set_inoalignment(xfs_mount_t *mp) * Check that the data (and log if separate) are an ok size. */ STATIC int -xfs_check_sizes(xfs_mount_t *mp) +xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) { xfs_buf_t *bp; xfs_daddr_t d; @@ -873,7 +887,8 @@ xfs_check_sizes(xfs_mount_t *mp) return error; } - if (mp->m_logdev_targp != mp->m_ddev_targp) { + if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && + mp->m_logdev_targp != mp->m_ddev_targp) { d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { cmn_err(CE_WARN, "XFS: size check 3 failed"); @@ -908,13 +923,15 @@ xfs_check_sizes(xfs_mount_t *mp) */ int xfs_mountfs( - xfs_mount_t *mp) + xfs_mount_t *mp, + int mfsi_flags) { xfs_sb_t *sbp = &(mp->m_sb); xfs_inode_t *rip; __uint64_t resblks; __int64_t update_flags = 0LL; uint quotamount, quotaflags; + int agno; int uuid_mounted = 0; int error = 0; @@ -968,7 +985,7 @@ xfs_mountfs( * allocator alignment is within an ag, therefore ag has * to be aligned at stripe boundary. */ - error = xfs_update_alignment(mp, &update_flags); + error = xfs_update_alignment(mp, mfsi_flags, &update_flags); if (error) goto error1; @@ -987,7 +1004,8 @@ xfs_mountfs( * since a single partition filesystem is identical to a single * partition volume/filesystem. */ - if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) { + if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && + (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { if (xfs_uuid_mount(mp)) { error = XFS_ERROR(EINVAL); goto error1; @@ -1015,7 +1033,7 @@ xfs_mountfs( /* * Check that the data (and log if separate) are an ok size. */ - error = xfs_check_sizes(mp); + error = xfs_check_sizes(mp, mfsi_flags); if (error) goto error1; @@ -1028,6 +1046,13 @@ xfs_mountfs( goto error1; } + /* + * For client case we are done now + */ + if (mfsi_flags & XFS_MFSI_CLIENT) { + return 0; + } + /* * Copies the low order bits of the timestamp and the randomly * set "sequence" number out of a UUID. @@ -1052,10 +1077,8 @@ xfs_mountfs( * Allocate and initialize the per-ag data. */ init_rwsem(&mp->m_peraglock); - mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), - KM_MAYFAIL); - if (!mp->m_perag) - goto error1; + mp->m_perag = + kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); @@ -1167,7 +1190,7 @@ xfs_mountfs( * delayed until after the root and real-time bitmap inodes * were consistently read in. */ - error = xfs_log_mount_finish(mp); + error = xfs_log_mount_finish(mp, mfsi_flags); if (error) { cmn_err(CE_WARN, "XFS: log mount finish failed"); goto error4; @@ -1176,7 +1199,7 @@ xfs_mountfs( /* * Complete the quota initialisation, post-log-replay component. */ - error = XFS_QM_MOUNT(mp, quotamount, quotaflags); + error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); if (error) goto error4; @@ -1210,7 +1233,12 @@ xfs_mountfs( error3: xfs_log_unmount_dealloc(mp); error2: - xfs_free_perag(mp); + for (agno = 0; agno < sbp->sb_agcount; agno++) + if (mp->m_perag[agno].pagb_list) + kmem_free(mp->m_perag[agno].pagb_list); + kmem_free(mp->m_perag); + mp->m_perag = NULL; + /* FALLTHROUGH */ error1: if (uuid_mounted) uuid_table_remove(&mp->m_sb.sb_uuid); @@ -1218,17 +1246,16 @@ xfs_mountfs( } /* + * xfs_unmountfs + * * This flushes out the inodes,dquots and the superblock, unmounts the * log and makes sure that incore structures are freed. */ -void -xfs_unmountfs( - struct xfs_mount *mp) +int +xfs_unmountfs(xfs_mount_t *mp) { - __uint64_t resblks; - int error; - - IRELE(mp->m_rootip); + __uint64_t resblks; + int error = 0; /* * We can potentially deadlock here if we have an inode cluster @@ -1285,6 +1312,8 @@ xfs_unmountfs( xfs_unmountfs_wait(mp); /* wait for async bufs */ xfs_log_unmount(mp); /* Done! No more fs ops. */ + xfs_freesb(mp); + /* * All inodes from this mount point should be freed. */ @@ -1293,12 +1322,11 @@ xfs_unmountfs( if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) uuid_table_remove(&mp->m_sb.sb_uuid); -#if defined(DEBUG) +#if defined(DEBUG) || defined(INDUCE_IO_ERROR) xfs_errortag_clearall(mp, 0); #endif - xfs_free_perag(mp); - if (mp->m_quotainfo) - XFS_QM_DONE(mp); + xfs_mount_free(mp); + return 0; } STATIC void diff --git a/trunk/fs/xfs/xfs_mount.h b/trunk/fs/xfs/xfs_mount.h index f3c1024b1241..5269bd6e3df0 100644 --- a/trunk/fs/xfs/xfs_mount.h +++ b/trunk/fs/xfs/xfs_mount.h @@ -114,7 +114,7 @@ struct xfs_dqtrxops; struct xfs_quotainfo; typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); -typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); +typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int); typedef int (*xfs_qmunmount_t)(struct xfs_mount *); typedef void (*xfs_qmdone_t)(struct xfs_mount *); typedef void (*xfs_dqrele_t)(struct xfs_dquot *); @@ -158,8 +158,8 @@ typedef struct xfs_qmops { #define XFS_QM_INIT(mp, mnt, fl) \ (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl) -#define XFS_QM_MOUNT(mp, mnt, fl) \ - (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl) +#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \ + (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl, mfsi_flags) #define XFS_QM_UNMOUNT(mp) \ (*(mp)->m_qm_ops->xfs_qmunmount)(mp) #define XFS_QM_DONE(mp) \ @@ -442,6 +442,13 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, /* * Flags for xfs_mountfs */ +#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ +#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */ +/* XFS_MFSI_RRINODES */ +#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */ + /* log recovery */ +#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */ +/* XFS_MFSI_CONVERT_SUNIT */ #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ #define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) @@ -510,10 +517,10 @@ typedef struct xfs_mod_sb { extern void xfs_mod_sb(xfs_trans_t *, __int64_t); extern int xfs_log_sbcount(xfs_mount_t *, uint); -extern int xfs_mountfs(xfs_mount_t *mp); +extern int xfs_mountfs(xfs_mount_t *mp, int); extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); -extern void xfs_unmountfs(xfs_mount_t *); +extern int xfs_unmountfs(xfs_mount_t *); extern int xfs_unmountfs_writesb(xfs_mount_t *); extern int xfs_unmount_flush(xfs_mount_t *, int); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); diff --git a/trunk/fs/xfs/xfs_rtalloc.c b/trunk/fs/xfs/xfs_rtalloc.c index e2f68de16159..bf87a5913504 100644 --- a/trunk/fs/xfs/xfs_rtalloc.c +++ b/trunk/fs/xfs/xfs_rtalloc.c @@ -73,6 +73,18 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, * Internal functions. */ +/* + * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. + */ +STATIC int +xfs_lowbit32( + __uint32_t v) +{ + if (v) + return ffs(v) - 1; + return -1; +} + /* * Allocate space to the bitmap or summary file, and zero it, for growfs. */ @@ -438,7 +450,6 @@ xfs_rtallocate_extent_near( } bbno = XFS_BITTOBLOCK(mp, bno); i = 0; - ASSERT(minlen != 0); log2len = xfs_highbit32(minlen); /* * Loop over all bitmap blocks (bbno + i is current block). @@ -607,8 +618,6 @@ xfs_rtallocate_extent_size( xfs_suminfo_t sum; /* summary information for extents */ ASSERT(minlen % prod == 0 && maxlen % prod == 0); - ASSERT(maxlen != 0); - /* * Loop over all the levels starting with maxlen. * At each level, look at all the bitmap blocks, to see if there @@ -666,9 +675,6 @@ xfs_rtallocate_extent_size( *rtblock = NULLRTBLOCK; return 0; } - ASSERT(minlen != 0); - ASSERT(maxlen != 0); - /* * Loop over sizes, from maxlen down to minlen. * This time, when we do the allocations, allow smaller ones @@ -1955,7 +1961,6 @@ xfs_growfs_rt( nsbp->sb_blocksize * nsbp->sb_rextsize); nsbp->sb_rextents = nsbp->sb_rblocks; do_div(nsbp->sb_rextents, nsbp->sb_rextsize); - ASSERT(nsbp->sb_rextents != 0); nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; nrsumsize = diff --git a/trunk/fs/xfs/xfs_rw.c b/trunk/fs/xfs/xfs_rw.c index 3a82576dde9a..b0f31c09a76d 100644 --- a/trunk/fs/xfs/xfs_rw.c +++ b/trunk/fs/xfs/xfs_rw.c @@ -314,7 +314,7 @@ xfs_bioerror_relse( * ASYNC buffers. */ XFS_BUF_ERROR(bp, EIO); - XFS_BUF_FINISH_IOWAIT(bp); + XFS_BUF_V_IODONESEMA(bp); } else { xfs_buf_relse(bp); } diff --git a/trunk/fs/xfs/xfs_trans.c b/trunk/fs/xfs/xfs_trans.c index 4e1c22a23be5..e4ebddd3c500 100644 --- a/trunk/fs/xfs/xfs_trans.c +++ b/trunk/fs/xfs/xfs_trans.c @@ -43,7 +43,6 @@ #include "xfs_quota.h" #include "xfs_trans_priv.h" #include "xfs_trans_space.h" -#include "xfs_inode_item.h" STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); @@ -254,7 +253,7 @@ _xfs_trans_alloc( tp->t_mountp = mp; tp->t_items_free = XFS_LIC_NUM_SLOTS; tp->t_busy_free = XFS_LBC_NUM_SLOTS; - xfs_lic_init(&(tp->t_items)); + XFS_LIC_INIT(&(tp->t_items)); XFS_LBC_INIT(&(tp->t_busy)); return tp; } @@ -283,7 +282,7 @@ xfs_trans_dup( ntp->t_mountp = tp->t_mountp; ntp->t_items_free = XFS_LIC_NUM_SLOTS; ntp->t_busy_free = XFS_LBC_NUM_SLOTS; - xfs_lic_init(&(ntp->t_items)); + XFS_LIC_INIT(&(ntp->t_items)); XFS_LBC_INIT(&(ntp->t_busy)); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); @@ -1170,7 +1169,7 @@ xfs_trans_cancel( while (licp != NULL) { lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } @@ -1217,68 +1216,6 @@ xfs_trans_free( kmem_zone_free(xfs_trans_zone, tp); } -/* - * Roll from one trans in the sequence of PERMANENT transactions to - * the next: permanent transactions are only flushed out when - * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon - * as possible to let chunks of it go to the log. So we commit the - * chunk we've been working on and get a new transaction to continue. - */ -int -xfs_trans_roll( - struct xfs_trans **tpp, - struct xfs_inode *dp) -{ - struct xfs_trans *trans; - unsigned int logres, count; - int error; - - /* - * Ensure that the inode is always logged. - */ - trans = *tpp; - xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); - - /* - * Copy the critical parameters from one trans to the next. - */ - logres = trans->t_log_res; - count = trans->t_log_count; - *tpp = xfs_trans_dup(trans); - - /* - * Commit the current transaction. - * If this commit failed, then it'd just unlock those items that - * are not marked ihold. That also means that a filesystem shutdown - * is in progress. The caller takes the responsibility to cancel - * the duplicate transaction that gets returned. - */ - error = xfs_trans_commit(trans, 0); - if (error) - return (error); - - trans = *tpp; - - /* - * Reserve space in the log for th next transaction. - * This also pushes items in the "AIL", the list of logged items, - * out to disk if they are taking up space at the tail of the log - * that we want to use. This requires that either nothing be locked - * across this call, or that anything that is locked be logged in - * the prior and the next transactions. - */ - error = xfs_trans_reserve(trans, 0, logres, 0, - XFS_TRANS_PERM_LOG_RES, count); - /* - * Ensure that the inode is in the new transaction and locked. - */ - if (error) - return error; - - xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); - xfs_trans_ihold(trans, dp); - return 0; -} /* * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). @@ -1316,7 +1253,7 @@ xfs_trans_committed( * Special case the chunk embedded in the transaction. */ licp = &(tp->t_items); - if (!(xfs_lic_are_all_free(licp))) { + if (!(XFS_LIC_ARE_ALL_FREE(licp))) { xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); } @@ -1325,7 +1262,7 @@ xfs_trans_committed( */ licp = licp->lic_next; while (licp != NULL) { - ASSERT(!xfs_lic_are_all_free(licp)); + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); next_licp = licp->lic_next; kmem_free(licp); @@ -1388,7 +1325,7 @@ xfs_trans_chunk_committed( lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } diff --git a/trunk/fs/xfs/xfs_trans.h b/trunk/fs/xfs/xfs_trans.h index 74c80bd2b0ec..0804207c7391 100644 --- a/trunk/fs/xfs/xfs_trans.h +++ b/trunk/fs/xfs/xfs_trans.h @@ -210,52 +210,62 @@ typedef struct xfs_log_item_chunk { * lic_unused to the right value (0 matches all free). The * lic_descs.lid_index values are set up as each desc is allocated. */ +#define XFS_LIC_INIT(cp) xfs_lic_init(cp) static inline void xfs_lic_init(xfs_log_item_chunk_t *cp) { cp->lic_free = XFS_LIC_FREEMASK; } +#define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot) static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot) { cp->lic_descs[slot].lid_index = (unsigned char)(slot); } +#define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp) static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp) { return cp->lic_free & XFS_LIC_FREEMASK; } +#define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp) static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp) { cp->lic_free = XFS_LIC_FREEMASK; } +#define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp) static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp) { return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK); } +#define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot) static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) { return (cp->lic_free & (1 << slot)); } +#define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot) static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot) { cp->lic_free &= ~(1 << slot); } +#define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot) static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot) { cp->lic_free |= 1 << slot; } +#define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot) static inline xfs_log_item_desc_t * xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot) { return &(cp->lic_descs[slot]); } +#define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp) static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) { return (uint)dp->lid_index; @@ -268,6 +278,7 @@ static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) * All of this yields the address of the chunk, which is * cast to a chunk pointer. */ +#define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp) static inline xfs_log_item_chunk_t * xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) { @@ -975,7 +986,6 @@ int _xfs_trans_commit(xfs_trans_t *, int *); #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) void xfs_trans_cancel(xfs_trans_t *, int); -int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *); int xfs_trans_ail_init(struct xfs_mount *); void xfs_trans_ail_destroy(struct xfs_mount *); void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); diff --git a/trunk/fs/xfs/xfs_trans_buf.c b/trunk/fs/xfs/xfs_trans_buf.c index 4e855b5ced66..cb0c5839154b 100644 --- a/trunk/fs/xfs/xfs_trans_buf.c +++ b/trunk/fs/xfs/xfs_trans_buf.c @@ -1021,16 +1021,16 @@ xfs_trans_buf_item_match( bp = NULL; len = BBTOB(len); licp = &tp->t_items; - if (!xfs_lic_are_all_free(licp)) { + if (!XFS_LIC_ARE_ALL_FREE(licp)) { for (i = 0; i < licp->lic_unused; i++) { /* * Skip unoccupied slots. */ - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - lidp = xfs_lic_slot(licp, i); + lidp = XFS_LIC_SLOT(licp, i); blip = (xfs_buf_log_item_t *)lidp->lid_item; if (blip->bli_item.li_type != XFS_LI_BUF) { continue; @@ -1074,7 +1074,7 @@ xfs_trans_buf_item_match_all( bp = NULL; len = BBTOB(len); for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { - if (xfs_lic_are_all_free(licp)) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { ASSERT(licp == &tp->t_items); ASSERT(licp->lic_next == NULL); return NULL; @@ -1083,11 +1083,11 @@ xfs_trans_buf_item_match_all( /* * Skip unoccupied slots. */ - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - lidp = xfs_lic_slot(licp, i); + lidp = XFS_LIC_SLOT(licp, i); blip = (xfs_buf_log_item_t *)lidp->lid_item; if (blip->bli_item.li_type != XFS_LI_BUF) { continue; diff --git a/trunk/fs/xfs/xfs_trans_item.c b/trunk/fs/xfs/xfs_trans_item.c index 3c666e8317f8..db5c83595526 100644 --- a/trunk/fs/xfs/xfs_trans_item.c +++ b/trunk/fs/xfs/xfs_trans_item.c @@ -53,11 +53,11 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) * Initialize the chunk, and then * claim the first slot in the newly allocated chunk. */ - xfs_lic_init(licp); - xfs_lic_claim(licp, 0); + XFS_LIC_INIT(licp); + XFS_LIC_CLAIM(licp, 0); licp->lic_unused = 1; - xfs_lic_init_slot(licp, 0); - lidp = xfs_lic_slot(licp, 0); + XFS_LIC_INIT_SLOT(licp, 0); + lidp = XFS_LIC_SLOT(licp, 0); /* * Link in the new chunk and update the free count. @@ -88,14 +88,14 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) */ licp = &tp->t_items; while (licp != NULL) { - if (xfs_lic_vacancy(licp)) { + if (XFS_LIC_VACANCY(licp)) { if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { i = licp->lic_unused; - ASSERT(xfs_lic_isfree(licp, i)); + ASSERT(XFS_LIC_ISFREE(licp, i)); break; } for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { - if (xfs_lic_isfree(licp, i)) + if (XFS_LIC_ISFREE(licp, i)) break; } ASSERT(i <= XFS_LIC_MAX_SLOT); @@ -108,12 +108,12 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) * If we find a free descriptor, claim it, * initialize it, and return it. */ - xfs_lic_claim(licp, i); + XFS_LIC_CLAIM(licp, i); if (licp->lic_unused <= i) { licp->lic_unused = i + 1; - xfs_lic_init_slot(licp, i); + XFS_LIC_INIT_SLOT(licp, i); } - lidp = xfs_lic_slot(licp, i); + lidp = XFS_LIC_SLOT(licp, i); tp->t_items_free--; lidp->lid_item = lip; lidp->lid_flags = 0; @@ -136,9 +136,9 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) xfs_log_item_chunk_t *licp; xfs_log_item_chunk_t **licpp; - slot = xfs_lic_desc_to_slot(lidp); - licp = xfs_lic_desc_to_chunk(lidp); - xfs_lic_relse(licp, slot); + slot = XFS_LIC_DESC_TO_SLOT(lidp); + licp = XFS_LIC_DESC_TO_CHUNK(lidp); + XFS_LIC_RELSE(licp, slot); lidp->lid_item->li_desc = NULL; tp->t_items_free++; @@ -154,7 +154,7 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) * Also decrement the transaction structure's count of free items * by the number in a chunk since we are freeing an empty chunk. */ - if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) { + if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) { licpp = &(tp->t_items.lic_next); while (*licpp != licp) { ASSERT(*licpp != NULL); @@ -207,20 +207,20 @@ xfs_trans_first_item(xfs_trans_t *tp) /* * If it's not in the first chunk, skip to the second. */ - if (xfs_lic_are_all_free(licp)) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { licp = licp->lic_next; } /* * Return the first non-free descriptor in the chunk. */ - ASSERT(!xfs_lic_are_all_free(licp)); + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); for (i = 0; i < licp->lic_unused; i++) { - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - return xfs_lic_slot(licp, i); + return XFS_LIC_SLOT(licp, i); } cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); return NULL; @@ -242,18 +242,18 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) xfs_log_item_chunk_t *licp; int i; - licp = xfs_lic_desc_to_chunk(lidp); + licp = XFS_LIC_DESC_TO_CHUNK(lidp); /* * First search the rest of the chunk. The for loop keeps us * from referencing things beyond the end of the chunk. */ - for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) { - if (xfs_lic_isfree(licp, i)) { + for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - return xfs_lic_slot(licp, i); + return XFS_LIC_SLOT(licp, i); } /* @@ -266,13 +266,13 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) } licp = licp->lic_next; - ASSERT(!xfs_lic_are_all_free(licp)); + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); for (i = 0; i < licp->lic_unused; i++) { - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } - return xfs_lic_slot(licp, i); + return XFS_LIC_SLOT(licp, i); } ASSERT(0); /* NOTREACHED */ @@ -300,9 +300,9 @@ xfs_trans_free_items( /* * Special case the embedded chunk so we don't free it below. */ - if (!xfs_lic_are_all_free(licp)) { + if (!XFS_LIC_ARE_ALL_FREE(licp)) { (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); - xfs_lic_all_free(licp); + XFS_LIC_ALL_FREE(licp); licp->lic_unused = 0; } licp = licp->lic_next; @@ -311,7 +311,7 @@ xfs_trans_free_items( * Unlock each item in each chunk and free the chunks. */ while (licp != NULL) { - ASSERT(!xfs_lic_are_all_free(licp)); + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); next_licp = licp->lic_next; kmem_free(licp); @@ -347,7 +347,7 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) /* * Special case the embedded chunk so we don't free. */ - if (!xfs_lic_are_all_free(licp)) { + if (!XFS_LIC_ARE_ALL_FREE(licp)) { freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); } licpp = &(tp->t_items.lic_next); @@ -358,10 +358,10 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) * and free empty chunks. */ while (licp != NULL) { - ASSERT(!xfs_lic_are_all_free(licp)); + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); next_licp = licp->lic_next; - if (xfs_lic_are_all_free(licp)) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { *licpp = next_licp; kmem_free(licp); freed -= XFS_LIC_NUM_SLOTS; @@ -402,7 +402,7 @@ xfs_trans_unlock_chunk( freed = 0; lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { - if (xfs_lic_isfree(licp, i)) { + if (XFS_LIC_ISFREE(licp, i)) { continue; } lip = lidp->lid_item; @@ -421,7 +421,7 @@ xfs_trans_unlock_chunk( */ if (!(freeing_chunk) && (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { - xfs_lic_relse(licp, i); + XFS_LIC_RELSE(licp, i); freed++; } } diff --git a/trunk/fs/xfs/xfs_utils.c b/trunk/fs/xfs/xfs_utils.c index 35d4d414bcc2..98e5f110ba5f 100644 --- a/trunk/fs/xfs/xfs_utils.c +++ b/trunk/fs/xfs/xfs_utils.c @@ -237,7 +237,7 @@ xfs_droplink( ASSERT (ip->i_d.di_nlink > 0); ip->i_d.di_nlink--; - drop_nlink(VFS_I(ip)); + drop_nlink(ip->i_vnode); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = 0; @@ -301,7 +301,7 @@ xfs_bumplink( ASSERT(ip->i_d.di_nlink > 0); ip->i_d.di_nlink++; - inc_nlink(VFS_I(ip)); + inc_nlink(ip->i_vnode); if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && (ip->i_d.di_nlink > XFS_MAXLINK_1)) { /* diff --git a/trunk/fs/xfs/xfs_utils.h b/trunk/fs/xfs/xfs_utils.h index ef321225d269..f316cb85d8e2 100644 --- a/trunk/fs/xfs/xfs_utils.h +++ b/trunk/fs/xfs/xfs_utils.h @@ -18,6 +18,9 @@ #ifndef __XFS_UTILS_H__ #define __XFS_UTILS_H__ +#define IRELE(ip) VN_RELE(XFS_ITOV(ip)) +#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) + extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, xfs_dev_t, cred_t *, prid_t, int, diff --git a/trunk/fs/xfs/xfs_vfsops.c b/trunk/fs/xfs/xfs_vfsops.c index 439dd3939dda..4a9a43315a86 100644 --- a/trunk/fs/xfs/xfs_vfsops.c +++ b/trunk/fs/xfs/xfs_vfsops.c @@ -128,6 +128,7 @@ xfs_unmount_flush( xfs_inode_t *rip = mp->m_rootip; xfs_inode_t *rbmip; xfs_inode_t *rsumip = NULL; + bhv_vnode_t *rvp = XFS_ITOV(rip); int error; xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); @@ -145,7 +146,7 @@ xfs_unmount_flush( if (error == EFSCORRUPTED) goto fscorrupt_out; - ASSERT(vn_count(VFS_I(rbmip)) == 1); + ASSERT(vn_count(XFS_ITOV(rbmip)) == 1); rsumip = mp->m_rsumip; xfs_ilock(rsumip, XFS_ILOCK_EXCL); @@ -156,7 +157,7 @@ xfs_unmount_flush( if (error == EFSCORRUPTED) goto fscorrupt_out; - ASSERT(vn_count(VFS_I(rsumip)) == 1); + ASSERT(vn_count(XFS_ITOV(rsumip)) == 1); } /* @@ -166,7 +167,7 @@ xfs_unmount_flush( if (error == EFSCORRUPTED) goto fscorrupt_out2; - if (vn_count(VFS_I(rip)) != 1 && !relocation) { + if (vn_count(rvp) != 1 && !relocation) { xfs_iunlock(rip, XFS_ILOCK_EXCL); return XFS_ERROR(EBUSY); } @@ -283,7 +284,7 @@ xfs_sync_inodes( int *bypassed) { xfs_inode_t *ip = NULL; - struct inode *vp = NULL; + bhv_vnode_t *vp = NULL; int error; int last_error; uint64_t fflag; @@ -403,7 +404,7 @@ xfs_sync_inodes( continue; } - vp = VFS_I(ip); + vp = XFS_ITOV_NULL(ip); /* * If the vnode is gone then this is being torn down, @@ -478,7 +479,7 @@ xfs_sync_inodes( IPOINTER_INSERT(ip, mp); xfs_ilock(ip, lock_flags); - ASSERT(vp == VFS_I(ip)); + ASSERT(vp == XFS_ITOV(ip)); ASSERT(ip->i_mount == mp); vnode_refed = B_TRUE; diff --git a/trunk/fs/xfs/xfs_vnodeops.c b/trunk/fs/xfs/xfs_vnodeops.c index aa238c8fbd7a..76a1166af822 100644 --- a/trunk/fs/xfs/xfs_vnodeops.c +++ b/trunk/fs/xfs/xfs_vnodeops.c @@ -83,7 +83,7 @@ xfs_setattr( cred_t *credp) { xfs_mount_t *mp = ip->i_mount; - struct inode *inode = VFS_I(ip); + struct inode *inode = XFS_ITOV(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int code; @@ -182,7 +182,7 @@ xfs_setattr( xfs_ilock(ip, lock_flags); /* boolean: are we the file owner? */ - file_owner = (current_fsuid() == ip->i_d.di_uid); + file_owner = (current_fsuid(credp) == ip->i_d.di_uid); /* * Change various properties of a file. @@ -513,6 +513,7 @@ xfs_setattr( ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; ip->i_update_core = 1; + timeflags &= ~XFS_ICHGTIME_ACC; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; @@ -713,7 +714,7 @@ xfs_fsync( return XFS_ERROR(EIO); /* capture size updates in I/O completion before writing the inode. */ - error = filemap_fdatawait(VFS_I(ip)->i_mapping); + error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); if (error) return XFS_ERROR(error); @@ -1159,6 +1160,7 @@ int xfs_release( xfs_inode_t *ip) { + bhv_vnode_t *vp = XFS_ITOV(ip); xfs_mount_t *mp = ip->i_mount; int error; @@ -1193,13 +1195,13 @@ xfs_release( * be exposed to that problem. */ truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); - if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) + if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0) xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); } if (ip->i_d.di_nlink != 0) { if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && - ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || + ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || ip->i_delayed_blks > 0)) && (ip->i_df.if_flags & XFS_IFEXTENTS)) && (!(ip->i_d.di_flags & @@ -1225,6 +1227,7 @@ int xfs_inactive( xfs_inode_t *ip) { + bhv_vnode_t *vp = XFS_ITOV(ip); xfs_bmap_free_t free_list; xfs_fsblock_t first_block; int committed; @@ -1239,7 +1242,7 @@ xfs_inactive( * If the inode is already free, then there can be nothing * to clean up here. */ - if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) { + if (ip->i_d.di_mode == 0 || VN_BAD(vp)) { ASSERT(ip->i_df.if_real_bytes == 0); ASSERT(ip->i_df.if_broot_bytes == 0); return VN_INACTIVE_CACHE; @@ -1269,7 +1272,7 @@ xfs_inactive( if (ip->i_d.di_nlink != 0) { if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && - ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || + ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || ip->i_delayed_blks > 0)) && (ip->i_df.if_flags & XFS_IFEXTENTS) && (!(ip->i_d.di_flags & @@ -1533,7 +1536,7 @@ xfs_create( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(), current_fsgid(), prid, + current_fsuid(credp), current_fsgid(credp), prid, XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; @@ -1704,6 +1707,111 @@ xfs_create( goto std_return; } +#ifdef DEBUG +/* + * Some counters to see if (and how often) we are hitting some deadlock + * prevention code paths. + */ + +int xfs_rm_locks; +int xfs_rm_lock_delays; +int xfs_rm_attempts; +#endif + +/* + * The following routine will lock the inodes associated with the + * directory and the named entry in the directory. The locks are + * acquired in increasing inode number. + * + * If the entry is "..", then only the directory is locked. The + * vnode ref count will still include that from the .. entry in + * this case. + * + * There is a deadlock we need to worry about. If the locked directory is + * in the AIL, it might be blocking up the log. The next inode we lock + * could be already locked by another thread waiting for log space (e.g + * a permanent log reservation with a long running transaction (see + * xfs_itruncate_finish)). To solve this, we must check if the directory + * is in the ail and use lock_nowait. If we can't lock, we need to + * drop the inode lock on the directory and try again. xfs_iunlock will + * potentially push the tail if we were holding up the log. + */ +STATIC int +xfs_lock_dir_and_entry( + xfs_inode_t *dp, + xfs_inode_t *ip) /* inode of entry 'name' */ +{ + int attempts; + xfs_ino_t e_inum; + xfs_inode_t *ips[2]; + xfs_log_item_t *lp; + +#ifdef DEBUG + xfs_rm_locks++; +#endif + attempts = 0; + +again: + xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); + + e_inum = ip->i_ino; + + xfs_itrace_ref(ip); + + /* + * We want to lock in increasing inum. Since we've already + * acquired the lock on the directory, we may need to release + * if if the inum of the entry turns out to be less. + */ + if (e_inum > dp->i_ino) { + /* + * We are already in the right order, so just + * lock on the inode of the entry. + * We need to use nowait if dp is in the AIL. + */ + + lp = (xfs_log_item_t *)dp->i_itemp; + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { + attempts++; +#ifdef DEBUG + xfs_rm_attempts++; +#endif + + /* + * Unlock dp and try again. + * xfs_iunlock will try to push the tail + * if the inode is in the AIL. + */ + + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ +#ifdef DEBUG + xfs_rm_lock_delays++; +#endif + } + goto again; + } + } else { + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + } else if (e_inum < dp->i_ino) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + ips[0] = ip; + ips[1] = dp; + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); + } + /* else e_inum == dp->i_ino */ + /* This can happen if we're asked to lock /x/.. + * the entry is "..", which is also the parent directory. + */ + + return 0; +} + #ifdef DEBUG int xfs_locked_n; int xfs_small_retries; @@ -1838,45 +1946,6 @@ xfs_lock_inodes( #endif } -void -xfs_lock_two_inodes( - xfs_inode_t *ip0, - xfs_inode_t *ip1, - uint lock_mode) -{ - xfs_inode_t *temp; - int attempts = 0; - xfs_log_item_t *lp; - - ASSERT(ip0->i_ino != ip1->i_ino); - - if (ip0->i_ino > ip1->i_ino) { - temp = ip0; - ip0 = ip1; - ip1 = temp; - } - - again: - xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0)); - - /* - * If the first lock we have locked is in the AIL, we must TRY to get - * the second lock. If we can't get it, we must release the first one - * and try again. - */ - lp = (xfs_log_item_t *)ip0->i_itemp; - if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { - if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) { - xfs_iunlock(ip0, lock_mode); - if ((++attempts % 5) == 0) - delay(1); /* Don't just spin the CPU */ - goto again; - } - } else { - xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1)); - } -} - int xfs_remove( xfs_inode_t *dp, @@ -1949,7 +2018,9 @@ xfs_remove( goto out_trans_cancel; } - xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); + error = xfs_lock_dir_and_entry(dp, ip); + if (error) + goto out_trans_cancel; /* * At this point, we've gotten both the directory and the entry @@ -1976,6 +2047,9 @@ xfs_remove( } } + /* + * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. + */ XFS_BMAP_INIT(&free_list, &first_block); error = xfs_dir_removename(tp, dp, name, ip->i_ino, &first_block, &free_list, resblks); @@ -2081,6 +2155,7 @@ xfs_link( { xfs_mount_t *mp = tdp->i_mount; xfs_trans_t *tp; + xfs_inode_t *ips[2]; int error; xfs_bmap_free_t free_list; xfs_fsblock_t first_block; @@ -2128,7 +2203,15 @@ xfs_link( goto error_return; } - xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); + if (sip->i_ino < tdp->i_ino) { + ips[0] = sip; + ips[1] = tdp; + } else { + ips[0] = tdp; + ips[1] = sip; + } + + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); /* * Increment vnode ref counts since xfs_trans_commit & @@ -2269,7 +2352,7 @@ xfs_mkdir( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(), current_fsgid(), prid, + current_fsuid(credp), current_fsgid(credp), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; @@ -2495,7 +2578,7 @@ xfs_symlink( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(), current_fsgid(), prid, + current_fsuid(credp), current_fsgid(credp), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; @@ -2790,13 +2873,14 @@ int xfs_reclaim( xfs_inode_t *ip) { + bhv_vnode_t *vp = XFS_ITOV(ip); xfs_itrace_entry(ip); - ASSERT(!VN_MAPPED(VFS_I(ip))); + ASSERT(!VN_MAPPED(vp)); /* bad inode, get out here ASAP */ - if (VN_BAD(VFS_I(ip))) { + if (VN_BAD(vp)) { xfs_ireclaim(ip); return 0; } @@ -2833,7 +2917,7 @@ xfs_reclaim( XFS_MOUNT_ILOCK(mp); spin_lock(&ip->i_flags_lock); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); - VFS_I(ip)->i_private = NULL; + vn_to_inode(vp)->i_private = NULL; ip->i_vnode = NULL; spin_unlock(&ip->i_flags_lock); list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); @@ -2849,7 +2933,7 @@ xfs_finish_reclaim( int sync_mode) { xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); - struct inode *vp = VFS_I(ip); + bhv_vnode_t *vp = XFS_ITOV_NULL(ip); if (vp && VN_BAD(vp)) goto reclaim; @@ -3237,6 +3321,7 @@ xfs_free_file_space( xfs_off_t len, int attr_flags) { + bhv_vnode_t *vp; int committed; int done; xfs_off_t end_dmi_offset; @@ -3256,6 +3341,7 @@ xfs_free_file_space( xfs_trans_t *tp; int need_iolock = 1; + vp = XFS_ITOV(ip); mp = ip->i_mount; xfs_itrace_entry(ip); @@ -3292,7 +3378,7 @@ xfs_free_file_space( rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); ioffset = offset & ~(rounding - 1); - if (VN_CACHED(VFS_I(ip)) != 0) { + if (VN_CACHED(vp) != 0) { xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); if (error) diff --git a/trunk/arch/h8300/include/asm/Kbuild b/trunk/include/asm-h8300/Kbuild similarity index 100% rename from trunk/arch/h8300/include/asm/Kbuild rename to trunk/include/asm-h8300/Kbuild diff --git a/trunk/arch/h8300/include/asm/a.out.h b/trunk/include/asm-h8300/a.out.h similarity index 100% rename from trunk/arch/h8300/include/asm/a.out.h rename to trunk/include/asm-h8300/a.out.h diff --git a/trunk/arch/h8300/include/asm/atomic.h b/trunk/include/asm-h8300/atomic.h similarity index 100% rename from trunk/arch/h8300/include/asm/atomic.h rename to trunk/include/asm-h8300/atomic.h diff --git a/trunk/arch/h8300/include/asm/auxvec.h b/trunk/include/asm-h8300/auxvec.h similarity index 100% rename from trunk/arch/h8300/include/asm/auxvec.h rename to trunk/include/asm-h8300/auxvec.h diff --git a/trunk/arch/h8300/include/asm/bitops.h b/trunk/include/asm-h8300/bitops.h similarity index 100% rename from trunk/arch/h8300/include/asm/bitops.h rename to trunk/include/asm-h8300/bitops.h diff --git a/trunk/arch/h8300/include/asm/bootinfo.h b/trunk/include/asm-h8300/bootinfo.h similarity index 100% rename from trunk/arch/h8300/include/asm/bootinfo.h rename to trunk/include/asm-h8300/bootinfo.h diff --git a/trunk/arch/h8300/include/asm/bug.h b/trunk/include/asm-h8300/bug.h similarity index 100% rename from trunk/arch/h8300/include/asm/bug.h rename to trunk/include/asm-h8300/bug.h diff --git a/trunk/arch/h8300/include/asm/bugs.h b/trunk/include/asm-h8300/bugs.h similarity index 100% rename from trunk/arch/h8300/include/asm/bugs.h rename to trunk/include/asm-h8300/bugs.h diff --git a/trunk/arch/h8300/include/asm/byteorder.h b/trunk/include/asm-h8300/byteorder.h similarity index 100% rename from trunk/arch/h8300/include/asm/byteorder.h rename to trunk/include/asm-h8300/byteorder.h diff --git a/trunk/arch/h8300/include/asm/cache.h b/trunk/include/asm-h8300/cache.h similarity index 100% rename from trunk/arch/h8300/include/asm/cache.h rename to trunk/include/asm-h8300/cache.h diff --git a/trunk/arch/h8300/include/asm/cachectl.h b/trunk/include/asm-h8300/cachectl.h similarity index 100% rename from trunk/arch/h8300/include/asm/cachectl.h rename to trunk/include/asm-h8300/cachectl.h diff --git a/trunk/arch/h8300/include/asm/cacheflush.h b/trunk/include/asm-h8300/cacheflush.h similarity index 100% rename from trunk/arch/h8300/include/asm/cacheflush.h rename to trunk/include/asm-h8300/cacheflush.h diff --git a/trunk/arch/h8300/include/asm/checksum.h b/trunk/include/asm-h8300/checksum.h similarity index 100% rename from trunk/arch/h8300/include/asm/checksum.h rename to trunk/include/asm-h8300/checksum.h diff --git a/trunk/arch/h8300/include/asm/cputime.h b/trunk/include/asm-h8300/cputime.h similarity index 100% rename from trunk/arch/h8300/include/asm/cputime.h rename to trunk/include/asm-h8300/cputime.h diff --git a/trunk/arch/h8300/include/asm/current.h b/trunk/include/asm-h8300/current.h similarity index 100% rename from trunk/arch/h8300/include/asm/current.h rename to trunk/include/asm-h8300/current.h diff --git a/trunk/arch/h8300/include/asm/dbg.h b/trunk/include/asm-h8300/dbg.h similarity index 100% rename from trunk/arch/h8300/include/asm/dbg.h rename to trunk/include/asm-h8300/dbg.h diff --git a/trunk/arch/h8300/include/asm/delay.h b/trunk/include/asm-h8300/delay.h similarity index 100% rename from trunk/arch/h8300/include/asm/delay.h rename to trunk/include/asm-h8300/delay.h diff --git a/trunk/arch/h8300/include/asm/device.h b/trunk/include/asm-h8300/device.h similarity index 100% rename from trunk/arch/h8300/include/asm/device.h rename to trunk/include/asm-h8300/device.h diff --git a/trunk/arch/h8300/include/asm/div64.h b/trunk/include/asm-h8300/div64.h similarity index 100% rename from trunk/arch/h8300/include/asm/div64.h rename to trunk/include/asm-h8300/div64.h diff --git a/trunk/arch/h8300/include/asm/dma.h b/trunk/include/asm-h8300/dma.h similarity index 100% rename from trunk/arch/h8300/include/asm/dma.h rename to trunk/include/asm-h8300/dma.h diff --git a/trunk/arch/h8300/include/asm/elf.h b/trunk/include/asm-h8300/elf.h similarity index 100% rename from trunk/arch/h8300/include/asm/elf.h rename to trunk/include/asm-h8300/elf.h diff --git a/trunk/arch/h8300/include/asm/emergency-restart.h b/trunk/include/asm-h8300/emergency-restart.h similarity index 100% rename from trunk/arch/h8300/include/asm/emergency-restart.h rename to trunk/include/asm-h8300/emergency-restart.h diff --git a/trunk/arch/h8300/include/asm/errno.h b/trunk/include/asm-h8300/errno.h similarity index 100% rename from trunk/arch/h8300/include/asm/errno.h rename to trunk/include/asm-h8300/errno.h diff --git a/trunk/arch/h8300/include/asm/fb.h b/trunk/include/asm-h8300/fb.h similarity index 100% rename from trunk/arch/h8300/include/asm/fb.h rename to trunk/include/asm-h8300/fb.h diff --git a/trunk/arch/h8300/include/asm/fcntl.h b/trunk/include/asm-h8300/fcntl.h similarity index 100% rename from trunk/arch/h8300/include/asm/fcntl.h rename to trunk/include/asm-h8300/fcntl.h diff --git a/trunk/arch/h8300/include/asm/flat.h b/trunk/include/asm-h8300/flat.h similarity index 100% rename from trunk/arch/h8300/include/asm/flat.h rename to trunk/include/asm-h8300/flat.h diff --git a/trunk/arch/h8300/include/asm/fpu.h b/trunk/include/asm-h8300/fpu.h similarity index 100% rename from trunk/arch/h8300/include/asm/fpu.h rename to trunk/include/asm-h8300/fpu.h diff --git a/trunk/arch/h8300/include/asm/futex.h b/trunk/include/asm-h8300/futex.h similarity index 100% rename from trunk/arch/h8300/include/asm/futex.h rename to trunk/include/asm-h8300/futex.h diff --git a/trunk/arch/h8300/include/asm/gpio.h b/trunk/include/asm-h8300/gpio.h similarity index 100% rename from trunk/arch/h8300/include/asm/gpio.h rename to trunk/include/asm-h8300/gpio.h diff --git a/trunk/arch/h8300/include/asm/hardirq.h b/trunk/include/asm-h8300/hardirq.h similarity index 100% rename from trunk/arch/h8300/include/asm/hardirq.h rename to trunk/include/asm-h8300/hardirq.h diff --git a/trunk/arch/h8300/include/asm/hw_irq.h b/trunk/include/asm-h8300/hw_irq.h similarity index 100% rename from trunk/arch/h8300/include/asm/hw_irq.h rename to trunk/include/asm-h8300/hw_irq.h diff --git a/trunk/arch/h8300/include/asm/io.h b/trunk/include/asm-h8300/io.h similarity index 100% rename from trunk/arch/h8300/include/asm/io.h rename to trunk/include/asm-h8300/io.h diff --git a/trunk/arch/h8300/include/asm/ioctl.h b/trunk/include/asm-h8300/ioctl.h similarity index 100% rename from trunk/arch/h8300/include/asm/ioctl.h rename to trunk/include/asm-h8300/ioctl.h diff --git a/trunk/arch/h8300/include/asm/ioctls.h b/trunk/include/asm-h8300/ioctls.h similarity index 100% rename from trunk/arch/h8300/include/asm/ioctls.h rename to trunk/include/asm-h8300/ioctls.h diff --git a/trunk/arch/h8300/include/asm/ipcbuf.h b/trunk/include/asm-h8300/ipcbuf.h similarity index 100% rename from trunk/arch/h8300/include/asm/ipcbuf.h rename to trunk/include/asm-h8300/ipcbuf.h diff --git a/trunk/arch/h8300/include/asm/irq.h b/trunk/include/asm-h8300/irq.h similarity index 100% rename from trunk/arch/h8300/include/asm/irq.h rename to trunk/include/asm-h8300/irq.h diff --git a/trunk/arch/h8300/include/asm/irq_regs.h b/trunk/include/asm-h8300/irq_regs.h similarity index 100% rename from trunk/arch/h8300/include/asm/irq_regs.h rename to trunk/include/asm-h8300/irq_regs.h diff --git a/trunk/arch/h8300/include/asm/kdebug.h b/trunk/include/asm-h8300/kdebug.h similarity index 100% rename from trunk/arch/h8300/include/asm/kdebug.h rename to trunk/include/asm-h8300/kdebug.h diff --git a/trunk/arch/h8300/include/asm/kmap_types.h b/trunk/include/asm-h8300/kmap_types.h similarity index 100% rename from trunk/arch/h8300/include/asm/kmap_types.h rename to trunk/include/asm-h8300/kmap_types.h diff --git a/trunk/arch/h8300/include/asm/linkage.h b/trunk/include/asm-h8300/linkage.h similarity index 100% rename from trunk/arch/h8300/include/asm/linkage.h rename to trunk/include/asm-h8300/linkage.h diff --git a/trunk/arch/h8300/include/asm/local.h b/trunk/include/asm-h8300/local.h similarity index 100% rename from trunk/arch/h8300/include/asm/local.h rename to trunk/include/asm-h8300/local.h diff --git a/trunk/arch/h8300/include/asm/mc146818rtc.h b/trunk/include/asm-h8300/mc146818rtc.h similarity index 100% rename from trunk/arch/h8300/include/asm/mc146818rtc.h rename to trunk/include/asm-h8300/mc146818rtc.h diff --git a/trunk/arch/h8300/include/asm/md.h b/trunk/include/asm-h8300/md.h similarity index 100% rename from trunk/arch/h8300/include/asm/md.h rename to trunk/include/asm-h8300/md.h diff --git a/trunk/arch/h8300/include/asm/mman.h b/trunk/include/asm-h8300/mman.h similarity index 100% rename from trunk/arch/h8300/include/asm/mman.h rename to trunk/include/asm-h8300/mman.h diff --git a/trunk/arch/h8300/include/asm/mmu.h b/trunk/include/asm-h8300/mmu.h similarity index 100% rename from trunk/arch/h8300/include/asm/mmu.h rename to trunk/include/asm-h8300/mmu.h diff --git a/trunk/arch/h8300/include/asm/mmu_context.h b/trunk/include/asm-h8300/mmu_context.h similarity index 100% rename from trunk/arch/h8300/include/asm/mmu_context.h rename to trunk/include/asm-h8300/mmu_context.h diff --git a/trunk/arch/h8300/include/asm/module.h b/trunk/include/asm-h8300/module.h similarity index 100% rename from trunk/arch/h8300/include/asm/module.h rename to trunk/include/asm-h8300/module.h diff --git a/trunk/arch/h8300/include/asm/msgbuf.h b/trunk/include/asm-h8300/msgbuf.h similarity index 100% rename from trunk/arch/h8300/include/asm/msgbuf.h rename to trunk/include/asm-h8300/msgbuf.h diff --git a/trunk/arch/h8300/include/asm/mutex.h b/trunk/include/asm-h8300/mutex.h similarity index 100% rename from trunk/arch/h8300/include/asm/mutex.h rename to trunk/include/asm-h8300/mutex.h diff --git a/trunk/arch/h8300/include/asm/page.h b/trunk/include/asm-h8300/page.h similarity index 100% rename from trunk/arch/h8300/include/asm/page.h rename to trunk/include/asm-h8300/page.h diff --git a/trunk/arch/h8300/include/asm/page_offset.h b/trunk/include/asm-h8300/page_offset.h similarity index 100% rename from trunk/arch/h8300/include/asm/page_offset.h rename to trunk/include/asm-h8300/page_offset.h diff --git a/trunk/arch/h8300/include/asm/param.h b/trunk/include/asm-h8300/param.h similarity index 100% rename from trunk/arch/h8300/include/asm/param.h rename to trunk/include/asm-h8300/param.h diff --git a/trunk/arch/h8300/include/asm/pci.h b/trunk/include/asm-h8300/pci.h similarity index 100% rename from trunk/arch/h8300/include/asm/pci.h rename to trunk/include/asm-h8300/pci.h diff --git a/trunk/arch/h8300/include/asm/percpu.h b/trunk/include/asm-h8300/percpu.h similarity index 100% rename from trunk/arch/h8300/include/asm/percpu.h rename to trunk/include/asm-h8300/percpu.h diff --git a/trunk/arch/h8300/include/asm/pgalloc.h b/trunk/include/asm-h8300/pgalloc.h similarity index 100% rename from trunk/arch/h8300/include/asm/pgalloc.h rename to trunk/include/asm-h8300/pgalloc.h diff --git a/trunk/arch/h8300/include/asm/pgtable.h b/trunk/include/asm-h8300/pgtable.h similarity index 100% rename from trunk/arch/h8300/include/asm/pgtable.h rename to trunk/include/asm-h8300/pgtable.h diff --git a/trunk/arch/h8300/include/asm/poll.h b/trunk/include/asm-h8300/poll.h similarity index 100% rename from trunk/arch/h8300/include/asm/poll.h rename to trunk/include/asm-h8300/poll.h diff --git a/trunk/arch/h8300/include/asm/posix_types.h b/trunk/include/asm-h8300/posix_types.h similarity index 100% rename from trunk/arch/h8300/include/asm/posix_types.h rename to trunk/include/asm-h8300/posix_types.h diff --git a/trunk/arch/h8300/include/asm/processor.h b/trunk/include/asm-h8300/processor.h similarity index 100% rename from trunk/arch/h8300/include/asm/processor.h rename to trunk/include/asm-h8300/processor.h diff --git a/trunk/arch/h8300/include/asm/ptrace.h b/trunk/include/asm-h8300/ptrace.h similarity index 100% rename from trunk/arch/h8300/include/asm/ptrace.h rename to trunk/include/asm-h8300/ptrace.h diff --git a/trunk/arch/h8300/include/asm/regs267x.h b/trunk/include/asm-h8300/regs267x.h similarity index 100% rename from trunk/arch/h8300/include/asm/regs267x.h rename to trunk/include/asm-h8300/regs267x.h diff --git a/trunk/arch/h8300/include/asm/regs306x.h b/trunk/include/asm-h8300/regs306x.h similarity index 100% rename from trunk/arch/h8300/include/asm/regs306x.h rename to trunk/include/asm-h8300/regs306x.h diff --git a/trunk/arch/h8300/include/asm/resource.h b/trunk/include/asm-h8300/resource.h similarity index 100% rename from trunk/arch/h8300/include/asm/resource.h rename to trunk/include/asm-h8300/resource.h diff --git a/trunk/arch/h8300/include/asm/scatterlist.h b/trunk/include/asm-h8300/scatterlist.h similarity index 100% rename from trunk/arch/h8300/include/asm/scatterlist.h rename to trunk/include/asm-h8300/scatterlist.h diff --git a/trunk/arch/h8300/include/asm/sections.h b/trunk/include/asm-h8300/sections.h similarity index 100% rename from trunk/arch/h8300/include/asm/sections.h rename to trunk/include/asm-h8300/sections.h diff --git a/trunk/arch/h8300/include/asm/segment.h b/trunk/include/asm-h8300/segment.h similarity index 100% rename from trunk/arch/h8300/include/asm/segment.h rename to trunk/include/asm-h8300/segment.h diff --git a/trunk/arch/h8300/include/asm/sembuf.h b/trunk/include/asm-h8300/sembuf.h similarity index 100% rename from trunk/arch/h8300/include/asm/sembuf.h rename to trunk/include/asm-h8300/sembuf.h diff --git a/trunk/arch/h8300/include/asm/setup.h b/trunk/include/asm-h8300/setup.h similarity index 100% rename from trunk/arch/h8300/include/asm/setup.h rename to trunk/include/asm-h8300/setup.h diff --git a/trunk/arch/h8300/include/asm/sh_bios.h b/trunk/include/asm-h8300/sh_bios.h similarity index 100% rename from trunk/arch/h8300/include/asm/sh_bios.h rename to trunk/include/asm-h8300/sh_bios.h diff --git a/trunk/arch/h8300/include/asm/shm.h b/trunk/include/asm-h8300/shm.h similarity index 100% rename from trunk/arch/h8300/include/asm/shm.h rename to trunk/include/asm-h8300/shm.h diff --git a/trunk/arch/h8300/include/asm/shmbuf.h b/trunk/include/asm-h8300/shmbuf.h similarity index 100% rename from trunk/arch/h8300/include/asm/shmbuf.h rename to trunk/include/asm-h8300/shmbuf.h diff --git a/trunk/arch/h8300/include/asm/shmparam.h b/trunk/include/asm-h8300/shmparam.h similarity index 100% rename from trunk/arch/h8300/include/asm/shmparam.h rename to trunk/include/asm-h8300/shmparam.h diff --git a/trunk/arch/h8300/include/asm/sigcontext.h b/trunk/include/asm-h8300/sigcontext.h similarity index 100% rename from trunk/arch/h8300/include/asm/sigcontext.h rename to trunk/include/asm-h8300/sigcontext.h diff --git a/trunk/arch/h8300/include/asm/siginfo.h b/trunk/include/asm-h8300/siginfo.h similarity index 100% rename from trunk/arch/h8300/include/asm/siginfo.h rename to trunk/include/asm-h8300/siginfo.h diff --git a/trunk/arch/h8300/include/asm/signal.h b/trunk/include/asm-h8300/signal.h similarity index 100% rename from trunk/arch/h8300/include/asm/signal.h rename to trunk/include/asm-h8300/signal.h diff --git a/trunk/arch/h8300/include/asm/smp.h b/trunk/include/asm-h8300/smp.h similarity index 100% rename from trunk/arch/h8300/include/asm/smp.h rename to trunk/include/asm-h8300/smp.h diff --git a/trunk/arch/h8300/include/asm/socket.h b/trunk/include/asm-h8300/socket.h similarity index 100% rename from trunk/arch/h8300/include/asm/socket.h rename to trunk/include/asm-h8300/socket.h diff --git a/trunk/arch/h8300/include/asm/sockios.h b/trunk/include/asm-h8300/sockios.h similarity index 100% rename from trunk/arch/h8300/include/asm/sockios.h rename to trunk/include/asm-h8300/sockios.h diff --git a/trunk/arch/h8300/include/asm/spinlock.h b/trunk/include/asm-h8300/spinlock.h similarity index 100% rename from trunk/arch/h8300/include/asm/spinlock.h rename to trunk/include/asm-h8300/spinlock.h diff --git a/trunk/arch/h8300/include/asm/stat.h b/trunk/include/asm-h8300/stat.h similarity index 100% rename from trunk/arch/h8300/include/asm/stat.h rename to trunk/include/asm-h8300/stat.h diff --git a/trunk/arch/h8300/include/asm/statfs.h b/trunk/include/asm-h8300/statfs.h similarity index 100% rename from trunk/arch/h8300/include/asm/statfs.h rename to trunk/include/asm-h8300/statfs.h diff --git a/trunk/arch/h8300/include/asm/string.h b/trunk/include/asm-h8300/string.h similarity index 100% rename from trunk/arch/h8300/include/asm/string.h rename to trunk/include/asm-h8300/string.h diff --git a/trunk/arch/h8300/include/asm/system.h b/trunk/include/asm-h8300/system.h similarity index 100% rename from trunk/arch/h8300/include/asm/system.h rename to trunk/include/asm-h8300/system.h diff --git a/trunk/arch/h8300/include/asm/target_time.h b/trunk/include/asm-h8300/target_time.h similarity index 100% rename from trunk/arch/h8300/include/asm/target_time.h rename to trunk/include/asm-h8300/target_time.h diff --git a/trunk/arch/h8300/include/asm/termbits.h b/trunk/include/asm-h8300/termbits.h similarity index 100% rename from trunk/arch/h8300/include/asm/termbits.h rename to trunk/include/asm-h8300/termbits.h diff --git a/trunk/arch/h8300/include/asm/termios.h b/trunk/include/asm-h8300/termios.h similarity index 100% rename from trunk/arch/h8300/include/asm/termios.h rename to trunk/include/asm-h8300/termios.h diff --git a/trunk/arch/h8300/include/asm/thread_info.h b/trunk/include/asm-h8300/thread_info.h similarity index 100% rename from trunk/arch/h8300/include/asm/thread_info.h rename to trunk/include/asm-h8300/thread_info.h diff --git a/trunk/arch/h8300/include/asm/timex.h b/trunk/include/asm-h8300/timex.h similarity index 100% rename from trunk/arch/h8300/include/asm/timex.h rename to trunk/include/asm-h8300/timex.h diff --git a/trunk/arch/h8300/include/asm/tlb.h b/trunk/include/asm-h8300/tlb.h similarity index 100% rename from trunk/arch/h8300/include/asm/tlb.h rename to trunk/include/asm-h8300/tlb.h diff --git a/trunk/arch/h8300/include/asm/tlbflush.h b/trunk/include/asm-h8300/tlbflush.h similarity index 100% rename from trunk/arch/h8300/include/asm/tlbflush.h rename to trunk/include/asm-h8300/tlbflush.h diff --git a/trunk/arch/h8300/include/asm/topology.h b/trunk/include/asm-h8300/topology.h similarity index 100% rename from trunk/arch/h8300/include/asm/topology.h rename to trunk/include/asm-h8300/topology.h diff --git a/trunk/arch/h8300/include/asm/traps.h b/trunk/include/asm-h8300/traps.h similarity index 100% rename from trunk/arch/h8300/include/asm/traps.h rename to trunk/include/asm-h8300/traps.h diff --git a/trunk/arch/h8300/include/asm/types.h b/trunk/include/asm-h8300/types.h similarity index 100% rename from trunk/arch/h8300/include/asm/types.h rename to trunk/include/asm-h8300/types.h diff --git a/trunk/arch/h8300/include/asm/uaccess.h b/trunk/include/asm-h8300/uaccess.h similarity index 100% rename from trunk/arch/h8300/include/asm/uaccess.h rename to trunk/include/asm-h8300/uaccess.h diff --git a/trunk/arch/h8300/include/asm/ucontext.h b/trunk/include/asm-h8300/ucontext.h similarity index 100% rename from trunk/arch/h8300/include/asm/ucontext.h rename to trunk/include/asm-h8300/ucontext.h diff --git a/trunk/arch/h8300/include/asm/unaligned.h b/trunk/include/asm-h8300/unaligned.h similarity index 100% rename from trunk/arch/h8300/include/asm/unaligned.h rename to trunk/include/asm-h8300/unaligned.h diff --git a/trunk/arch/h8300/include/asm/unistd.h b/trunk/include/asm-h8300/unistd.h similarity index 100% rename from trunk/arch/h8300/include/asm/unistd.h rename to trunk/include/asm-h8300/unistd.h diff --git a/trunk/arch/h8300/include/asm/user.h b/trunk/include/asm-h8300/user.h similarity index 100% rename from trunk/arch/h8300/include/asm/user.h rename to trunk/include/asm-h8300/user.h diff --git a/trunk/arch/h8300/include/asm/virtconvert.h b/trunk/include/asm-h8300/virtconvert.h similarity index 100% rename from trunk/arch/h8300/include/asm/virtconvert.h rename to trunk/include/asm-h8300/virtconvert.h diff --git a/trunk/include/asm-x86/i387.h b/trunk/include/asm-x86/i387.h index 6d3b21063419..96fa8449ff11 100644 --- a/trunk/include/asm-x86/i387.h +++ b/trunk/include/asm-x86/i387.h @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -237,37 +236,6 @@ static inline void kernel_fpu_end(void) preempt_enable(); } -/* - * Some instructions like VIA's padlock instructions generate a spurious - * DNA fault but don't modify SSE registers. And these instructions - * get used from interrupt context aswell. To prevent these kernel instructions - * in interrupt context interact wrongly with other user/kernel fpu usage, we - * should use them only in the context of irq_ts_save/restore() - */ -static inline int irq_ts_save(void) -{ - /* - * If we are in process context, we are ok to take a spurious DNA fault. - * Otherwise, doing clts() in process context require pre-emption to - * be disabled or some heavy lifting like kernel_fpu_begin() - */ - if (!in_interrupt()) - return 0; - - if (read_cr0() & X86_CR0_TS) { - clts(); - return 1; - } - - return 0; -} - -static inline void irq_ts_restore(int TS_state) -{ - if (TS_state) - stts(); -} - #ifdef CONFIG_X86_64 static inline void save_init_fpu(struct task_struct *tsk) diff --git a/trunk/include/crypto/hash.h b/trunk/include/crypto/hash.h index ee48ef8fb2ea..d12498ec8a4e 100644 --- a/trunk/include/crypto/hash.h +++ b/trunk/include/crypto/hash.h @@ -101,24 +101,6 @@ static inline int crypto_ahash_digest(struct ahash_request *req) return crt->digest(req); } -static inline int crypto_ahash_init(struct ahash_request *req) -{ - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->init(req); -} - -static inline int crypto_ahash_update(struct ahash_request *req) -{ - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->update(req); -} - -static inline int crypto_ahash_final(struct ahash_request *req) -{ - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->final(req); -} - static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { diff --git a/trunk/include/linux/completion.h b/trunk/include/linux/completion.h index 57faa60de9bd..d2961b66d53d 100644 --- a/trunk/include/linux/completion.h +++ b/trunk/include/linux/completion.h @@ -55,49 +55,4 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) - -/** - * try_wait_for_completion - try to decrement a completion without blocking - * @x: completion structure - * - * Returns: 0 if a decrement cannot be done without blocking - * 1 if a decrement succeeded. - * - * If a completion is being used as a counting completion, - * attempt to decrement the counter without blocking. This - * enables us to avoid waiting if the resource the completion - * is protecting is not available. - */ -static inline bool try_wait_for_completion(struct completion *x) -{ - int ret = 1; - - spin_lock_irq(&x->wait.lock); - if (!x->done) - ret = 0; - else - x->done--; - spin_unlock_irq(&x->wait.lock); - return ret; -} - -/** - * completion_done - Test to see if a completion has any waiters - * @x: completion structure - * - * Returns: 0 if there are waiters (wait_for_completion() in progress) - * 1 if there are no waiters. - * - */ -static inline bool completion_done(struct completion *x) -{ - int ret = 1; - - spin_lock_irq(&x->wait.lock); - if (!x->done) - ret = 0; - spin_unlock_irq(&x->wait.lock); - return ret; -} - #endif diff --git a/trunk/include/linux/cred.h b/trunk/include/linux/cred.h deleted file mode 100644 index b69222cc1fd2..000000000000 --- a/trunk/include/linux/cred.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Credentials management - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _LINUX_CRED_H -#define _LINUX_CRED_H - -#define get_current_user() (get_uid(current->user)) - -#define task_uid(task) ((task)->uid) -#define task_gid(task) ((task)->gid) -#define task_euid(task) ((task)->euid) -#define task_egid(task) ((task)->egid) - -#define current_uid() (current->uid) -#define current_gid() (current->gid) -#define current_euid() (current->euid) -#define current_egid() (current->egid) -#define current_suid() (current->suid) -#define current_sgid() (current->sgid) -#define current_fsuid() (current->fsuid) -#define current_fsgid() (current->fsgid) -#define current_cap() (current->cap_effective) - -#define current_uid_gid(_uid, _gid) \ -do { \ - *(_uid) = current->uid; \ - *(_gid) = current->gid; \ -} while(0) - -#define current_euid_egid(_uid, _gid) \ -do { \ - *(_uid) = current->euid; \ - *(_gid) = current->egid; \ -} while(0) - -#define current_fsuid_fsgid(_uid, _gid) \ -do { \ - *(_uid) = current->fsuid; \ - *(_gid) = current->fsgid; \ -} while(0) - -#endif /* _LINUX_CRED_H */ diff --git a/trunk/include/linux/if_tun.h b/trunk/include/linux/if_tun.h index 8529f57ba263..4c6307ad9fdb 100644 --- a/trunk/include/linux/if_tun.h +++ b/trunk/include/linux/if_tun.h @@ -45,7 +45,6 @@ #define TUNGETFEATURES _IOR('T', 207, unsigned int) #define TUNSETOFFLOAD _IOW('T', 208, unsigned int) #define TUNSETTXFILTER _IOW('T', 209, unsigned int) -#define TUNGETIFF _IOR('T', 210, unsigned int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index cfb0d87b99fc..5850bfb968a8 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -87,7 +87,6 @@ struct sched_param { #include #include #include -#include #include diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index 909923717830..cfcc45b3bef0 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len - skb_headlen(skb))) + !__pskb_pull_tail(skb, len-skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; } /** @@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len - size); + return skb_pad(skb, len-size); } static inline int skb_add_data(struct sk_buff *skb, @@ -1452,10 +1452,6 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from, extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov); -extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, - int offset, - struct iovec *from, - int len); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); diff --git a/trunk/include/linux/usb.h b/trunk/include/linux/usb.h index 0924cd9c30f6..5811c5da69f9 100644 --- a/trunk/include/linux/usb.h +++ b/trunk/include/linux/usb.h @@ -110,8 +110,6 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. - * @needs_binding: flag set when the driver should be re-probed or unbound - * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. diff --git a/trunk/include/linux/usb/musb.h b/trunk/include/linux/usb/musb.h deleted file mode 100644 index 630962c04ca4..000000000000 --- a/trunk/include/linux/usb/musb.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * This is used to for host and peripheral modes of the driver for - * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. - * - * Board initialization should put one of these into dev->platform_data, - * probably on some platform_device named "musb_hdrc". It encapsulates - * key configuration differences between boards. - */ - -/* The USB role is defined by the connector used on the board, so long as - * standards are being followed. (Developer boards sometimes won't.) - */ -enum musb_mode { - MUSB_UNDEFINED = 0, - MUSB_HOST, /* A or Mini-A connector */ - MUSB_PERIPHERAL, /* B or Mini-B connector */ - MUSB_OTG /* Mini-AB connector */ -}; - -struct clk; - -struct musb_hdrc_eps_bits { - const char name[16]; - u8 bits; -}; - -struct musb_hdrc_config { - /* MUSB configuration-specific details */ - unsigned multipoint:1; /* multipoint device */ - unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ - unsigned soft_con:1; /* soft connect required */ - unsigned utm_16:1; /* utm data witdh is 16 bits */ - unsigned big_endian:1; /* true if CPU uses big-endian */ - unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ - unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ - unsigned high_iso_tx:1; /* Tx ep required for HB iso */ - unsigned high_iso_rx:1; /* Rx ep required for HD iso */ - unsigned dma:1; /* supports DMA */ - unsigned vendor_req:1; /* vendor registers required */ - - u8 num_eps; /* number of endpoints _with_ ep0 */ - u8 dma_channels; /* number of dma channels */ - u8 dyn_fifo_size; /* dynamic size in bytes */ - u8 vendor_ctrl; /* vendor control reg width */ - u8 vendor_stat; /* vendor status reg witdh */ - u8 dma_req_chan; /* bitmask for required dma channels */ - u8 ram_bits; /* ram address size */ - - struct musb_hdrc_eps_bits *eps_bits; -}; - -struct musb_hdrc_platform_data { - /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ - u8 mode; - - /* for clk_get() */ - const char *clock; - - /* (HOST or OTG) switch VBUS on/off */ - int (*set_vbus)(struct device *dev, int is_on); - - /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ - u8 power; - - /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ - u8 min_power; - - /* (HOST or OTG) msec/2 after VBUS on till power good */ - u8 potpgt; - - /* Power the device on or off */ - int (*set_power)(int state); - - /* Turn device clock on or off */ - int (*set_clock)(struct clk *clock, int is_on); - - /* MUSB configuration-specific details */ - struct musb_hdrc_config *config; -}; - - -/* TUSB 6010 support */ - -#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ -#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ -#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ - -#ifdef CONFIG_ARCH_OMAP2 - -extern int __init tusb6010_setup_interface( - struct musb_hdrc_platform_data *data, - unsigned ps_refclk, unsigned waitpin, - unsigned async_cs, unsigned sync_cs, - unsigned irq, unsigned dmachan); - -extern int tusb6010_platform_retime(unsigned is_refclk); - -#endif /* OMAP2 */ diff --git a/trunk/include/linux/usb/serial.h b/trunk/include/linux/usb/serial.h index 655341d0f534..09a3e6a7518f 100644 --- a/trunk/include/linux/usb/serial.h +++ b/trunk/include/linux/usb/serial.h @@ -17,8 +17,7 @@ #include #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ -#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ +#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 diff --git a/trunk/include/net/addrconf.h b/trunk/include/net/addrconf.h index c216de528b08..06b28142b3ab 100644 --- a/trunk/include/net/addrconf.h +++ b/trunk/include/net/addrconf.h @@ -80,8 +80,7 @@ extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, struct net_device *dev, int strict); -extern int ipv6_dev_get_saddr(struct net *net, - struct net_device *dev, +extern int ipv6_dev_get_saddr(struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); diff --git a/trunk/include/net/ip6_route.h b/trunk/include/net/ip6_route.h index 5f53db7e4e57..2f8b3c06a101 100644 --- a/trunk/include/net/ip6_route.h +++ b/trunk/include/net/ip6_route.h @@ -38,6 +38,11 @@ struct route_info { #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 +#ifdef CONFIG_IPV6_MULTIPLE_TABLES +extern struct rt6_info *ip6_prohibit_entry; +extern struct rt6_info *ip6_blk_hole_entry; +#endif + extern void ip6_route_input(struct sk_buff *skb); extern struct dst_entry * ip6_route_output(struct net *net, @@ -107,13 +112,13 @@ struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; - struct net *net; }; extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); extern void rt6_ifdown(struct net *net, struct net_device *dev); extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); +extern rwlock_t rt6_lock; /* * Store a destination cache entry in a socket diff --git a/trunk/include/net/ip_vs.h b/trunk/include/net/ip_vs.h index 7312c3dd309f..cbb59ebed4ae 100644 --- a/trunk/include/net/ip_vs.h +++ b/trunk/include/net/ip_vs.h @@ -140,24 +140,8 @@ struct ip_vs_seq { /* - * IPVS statistics objects + * IPVS statistics object */ -struct ip_vs_estimator { - struct list_head list; - - u64 last_inbytes; - u64 last_outbytes; - u32 last_conns; - u32 last_inpkts; - u32 last_outpkts; - - u32 cps; - u32 inpps; - u32 outpps; - u32 inbps; - u32 outbps; -}; - struct ip_vs_stats { __u32 conns; /* connections scheduled */ @@ -172,15 +156,7 @@ struct ip_vs_stats __u32 inbps; /* current in byte rate */ __u32 outbps; /* current out byte rate */ - /* - * Don't add anything before the lock, because we use memcpy() to copy - * the members before the lock to struct ip_vs_stats_user in - * ip_vs_ctl.c. - */ - spinlock_t lock; /* spin lock */ - - struct ip_vs_estimator est; /* estimator */ }; struct dst_entry; @@ -464,7 +440,7 @@ struct ip_vs_app */ extern const char *ip_vs_proto_name(unsigned proto); extern void ip_vs_init_hash_table(struct list_head *table, int rows); -#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) +#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) #define IP_VS_APP_TYPE_FTP 1 @@ -644,7 +620,7 @@ extern int sysctl_ip_vs_expire_quiescent_template; extern int sysctl_ip_vs_sync_threshold[2]; extern int sysctl_ip_vs_nat_icmp_send; extern struct ip_vs_stats ip_vs_stats; -extern const struct ctl_path net_vs_ctl_path[]; +extern struct ctl_path net_vs_ctl_path[]; extern struct ip_vs_service * ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); @@ -683,7 +659,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp); /* * IPVS rate estimator prototypes (from ip_vs_est.c) */ -extern void ip_vs_new_estimator(struct ip_vs_stats *stats); +extern int ip_vs_new_estimator(struct ip_vs_stats *stats); extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index ff137fd7714f..b397e4d984c7 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -708,7 +708,10 @@ enum ieee80211_tkip_key_type { * rely on the host system for such buffering. This option is used * to configure the IEEE 802.11 upper layer to buffer broadcast and * multicast frames when there are power saving stations so that - * the driver can fetch them with ieee80211_get_buffered_bc(). + * the driver can fetch them with ieee80211_get_buffered_bc(). Note + * that not setting this flag works properly only when the + * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because + * otherwise the stack will not know when the DTIM beacon was sent. * * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE: * Hardware is not capable of short slot operation on the 2.4 GHz band. @@ -1096,8 +1099,10 @@ enum ieee80211_ampdu_mlme_action { * See the section "Frame filtering" for more information. * This callback must be implemented and atomic. * - * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit - * must be set or cleared for a given AID. Must be atomic. + * @set_tim: Set TIM bit. If the hardware/firmware takes care of beacon + * generation (that is, %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is set) + * mac80211 calls this function when a TIM bit must be set or cleared + * for a given AID. Must be atomic. * * @set_key: See the section "Hardware crypto acceleration" * This callback can sleep, and is only called between add_interface diff --git a/trunk/include/net/pkt_sched.h b/trunk/include/net/pkt_sched.h index 853fe83d9f37..6affcfaa123e 100644 --- a/trunk/include/net/pkt_sched.h +++ b/trunk/include/net/pkt_sched.h @@ -89,10 +89,7 @@ extern void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { - struct netdev_queue *txq = q->dev_queue; - - if (!netif_tx_queue_stopped(txq) && - !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) + if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) __qdisc_run(q); } diff --git a/trunk/include/net/sch_generic.h b/trunk/include/net/sch_generic.h index 84d25f2e6188..a7abfda3e447 100644 --- a/trunk/include/net/sch_generic.h +++ b/trunk/include/net/sch_generic.h @@ -27,7 +27,6 @@ enum qdisc_state_t { __QDISC_STATE_RUNNING, __QDISC_STATE_SCHED, - __QDISC_STATE_DEACTIVATED, }; struct qdisc_size_table { @@ -61,6 +60,7 @@ struct Qdisc struct gnet_stats_basic bstats; struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; + struct rcu_head q_rcu; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); diff --git a/trunk/net/bridge/br_device.c b/trunk/net/bridge/br_device.c index 4f52c3d50ebe..9b58d70b0e7d 100644 --- a/trunk/net/bridge/br_device.c +++ b/trunk/net/bridge/br_device.c @@ -148,16 +148,11 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) } static struct ethtool_ops br_ethtool_ops = { - .get_drvinfo = br_getinfo, - .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = br_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = br_set_sg, - .get_tso = ethtool_op_get_tso, - .set_tso = br_set_tso, - .get_ufo = ethtool_op_get_ufo, - .get_flags = ethtool_op_get_flags, + .get_drvinfo = br_getinfo, + .get_link = ethtool_op_get_link, + .set_sg = br_set_sg, + .set_tx_csum = br_set_tx_csum, + .set_tso = br_set_tso, }; void br_dev_setup(struct net_device *dev) diff --git a/trunk/net/core/datagram.c b/trunk/net/core/datagram.c index 52f577a0f544..dd61dcad6019 100644 --- a/trunk/net/core/datagram.c +++ b/trunk/net/core/datagram.c @@ -339,93 +339,6 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, return -EFAULT; } -/** - * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. - * @skb: buffer to copy - * @offset: offset in the buffer to start copying to - * @from: io vector to copy to - * @len: amount of data to copy to buffer from iovec - * - * Returns 0 or -EFAULT. - * Note: the iovec is modified during the copy. - */ -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - struct iovec *from, int len) -{ - int start = skb_headlen(skb); - int i, copy = start - offset; - - /* Copy header. */ - if (copy > 0) { - if (copy > len) - copy = len; - if (memcpy_fromiovec(skb->data + offset, from, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - } - - /* Copy paged appendix. Hmm... why does this look so complicated? */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - int end; - - WARN_ON(start > offset + len); - - end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - struct page *page = frag->page; - - if (copy > len) - copy = len; - vaddr = kmap(page); - err = memcpy_fromiovec(vaddr + frag->page_offset + - offset - start, from, copy); - kunmap(page); - if (err) - goto fault; - - if (!(len -= copy)) - return 0; - offset += copy; - } - start = end; - } - - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - WARN_ON(start > offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_from_iovec(list, - offset - start, - from, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - } - start = end; - } - } - if (!len) - return 0; - -fault: - return -EFAULT; -} -EXPORT_SYMBOL(skb_copy_datagram_from_iovec); - static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 __user *to, int len, __wsum *csump) diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 8d133802372b..600bb23c4c2e 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -1339,23 +1339,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) } -static inline void __netif_reschedule(struct Qdisc *q) -{ - struct softnet_data *sd; - unsigned long flags; - - local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); - q->next_sched = sd->output_queue; - sd->output_queue = q; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -} - void __netif_schedule(struct Qdisc *q) { - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) - __netif_reschedule(q); + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { + struct softnet_data *sd; + unsigned long flags; + + local_irq_save(flags); + sd = &__get_cpu_var(softnet_data); + q->next_sched = sd->output_queue; + sd->output_queue = q; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); + } } EXPORT_SYMBOL(__netif_schedule); @@ -1804,13 +1800,9 @@ int dev_queue_xmit(struct sk_buff *skb) spin_lock(root_lock); - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { - kfree_skb(skb); - rc = NET_XMIT_DROP; - } else { - rc = qdisc_enqueue_root(skb, q); - qdisc_run(q); - } + rc = qdisc_enqueue_root(skb, q); + qdisc_run(q); + spin_unlock(root_lock); goto out; @@ -1982,15 +1974,15 @@ static void net_tx_action(struct softirq_action *h) head = head->next_sched; + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, &q->state); + root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { - smp_mb__before_clear_bit(); - clear_bit(__QDISC_STATE_SCHED, - &q->state); qdisc_run(q); spin_unlock(root_lock); } else { - __netif_reschedule(q); + __netif_schedule(q); } } } @@ -2092,8 +2084,7 @@ static int ing_filter(struct sk_buff *skb) q = rxq->qdisc; if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); - if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) - result = qdisc_enqueue_root(skb, q); + result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index a756847e3814..526236453908 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -1961,8 +1961,6 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) */ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) { - int ntxq; - if (!pkt_dev->odev) { printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " "setup_inject.\n"); @@ -1971,33 +1969,6 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) return; } - /* make sure that we don't pick a non-existing transmit queue */ - ntxq = pkt_dev->odev->real_num_tx_queues; - if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { - printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " - "disabled because CPU count (%d) exceeds number ", - num_online_cpus()); - printk(KERN_WARNING "pktgen: WARNING: of tx queues " - "(%d) on %s \n", ntxq, pkt_dev->odev->name); - pkt_dev->flags &= ~F_QUEUE_MAP_CPU; - } - if (ntxq <= pkt_dev->queue_map_min) { - printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_min (%d) exceeds number of tx\n", - pkt_dev->queue_map_min); - printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " - "%s, resetting\n", ntxq, pkt_dev->odev->name); - pkt_dev->queue_map_min = ntxq - 1; - } - if (ntxq <= pkt_dev->queue_map_max) { - printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_max (%d) exceeds number of tx\n", - pkt_dev->queue_map_max); - printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " - "%s, resetting\n", ntxq, pkt_dev->odev->name); - pkt_dev->queue_map_max = ntxq - 1; - } - /* Default to the interface's mac if not explicitly set. */ if (is_zero_ether_addr(pkt_dev->src_mac)) diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index ca1ccdf1ef76..84640172d65d 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -2256,7 +2256,14 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) segs = nskb; tail = nskb; - __copy_skb_header(nskb, skb); + nskb->dev = skb->dev; + skb_copy_queue_mapping(nskb, skb); + nskb->priority = skb->priority; + nskb->protocol = skb->protocol; + nskb->vlan_tci = skb->vlan_tci; + nskb->dst = dst_clone(skb->dst); + memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); + nskb->pkt_type = skb->pkt_type; nskb->mac_len = skb->mac_len; skb_reserve(nskb, headroom); @@ -2267,7 +2274,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_copy_from_linear_data(skb, skb_put(nskb, doffset), doffset); if (!sg) { - nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(skb, offset, skb_put(nskb, len), len, 0); @@ -2277,6 +2283,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) frag = skb_shinfo(nskb)->frags; k = 0; + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum = skb->csum; skb_copy_from_linear_data_offset(skb, offset, skb_put(nskb, hsize), hsize); diff --git a/trunk/net/dccp/input.c b/trunk/net/dccp/input.c index 803933ab396d..df2f110df94a 100644 --- a/trunk/net/dccp/input.c +++ b/trunk/net/dccp/input.c @@ -411,6 +411,12 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, struct dccp_sock *dp = dccp_sk(sk); long tstamp = dccp_timestamp(); + /* Stop the REQUEST timer */ + inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); + WARN_ON(sk->sk_send_head == NULL); + __kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; + if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh)) { dccp_pr_debug("invalid ackno: S.AWL=%llu, " @@ -435,12 +441,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, DCCP_ACKVEC_STATE_RECEIVED)) goto out_invalid_packet; /* FIXME: change error code */ - /* Stop the REQUEST timer */ - inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); - WARN_ON(sk->sk_send_head == NULL); - kfree_skb(sk->sk_send_head); - sk->sk_send_head = NULL; - dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_update_gsr(sk, dp->dccps_isr); /* diff --git a/trunk/net/dccp/proto.c b/trunk/net/dccp/proto.c index 1ca3b26eed0f..b622d9744856 100644 --- a/trunk/net/dccp/proto.c +++ b/trunk/net/dccp/proto.c @@ -474,11 +474,6 @@ static int dccp_setsockopt_change(struct sock *sk, int type, if (copy_from_user(&opt, optval, sizeof(opt))) return -EFAULT; - /* - * rfc4340: 6.1. Change Options - */ - if (opt.dccpsf_len < 1) - return -EINVAL; val = kmalloc(opt.dccpsf_len, GFP_KERNEL); if (!val) diff --git a/trunk/net/ipv4/igmp.c b/trunk/net/ipv4/igmp.c index f70fac612596..6203ece53606 100644 --- a/trunk/net/ipv4/igmp.c +++ b/trunk/net/ipv4/igmp.c @@ -289,7 +289,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct rtable *rt; struct iphdr *pip; struct igmpv3_report *pig; - struct net *net = dev_net(dev); skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) @@ -300,7 +299,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) .nl_u = { .ip4_u = { .daddr = IGMPV3_ALL_MCR } }, .proto = IPPROTO_IGMP }; - if (ip_route_output_key(net, &rt, &fl)) { + if (ip_route_output_key(&init_net, &rt, &fl)) { kfree_skb(skb); return NULL; } @@ -630,7 +629,6 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, struct igmphdr *ih; struct rtable *rt; struct net_device *dev = in_dev->dev; - struct net *net = dev_net(dev); __be32 group = pmc ? pmc->multiaddr : 0; __be32 dst; @@ -645,7 +643,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, struct flowi fl = { .oif = dev->ifindex, .nl_u = { .ip4_u = { .daddr = dst } }, .proto = IPPROTO_IGMP }; - if (ip_route_output_key(net, &rt, &fl)) + if (ip_route_output_key(&init_net, &rt, &fl)) return -1; } if (rt->rt_src == 0) { @@ -1198,6 +1196,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + for (im=in_dev->mc_list; im; im=im->next) { if (im->multiaddr == addr) { im->users++; @@ -1277,6 +1278,9 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { if (i->multiaddr==addr) { if (--i->users == 0) { @@ -1304,6 +1308,9 @@ void ip_mc_down(struct in_device *in_dev) ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + for (i=in_dev->mc_list; i; i=i->next) igmp_group_dropped(i); @@ -1324,6 +1331,9 @@ void ip_mc_init_dev(struct in_device *in_dev) { ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + in_dev->mc_tomb = NULL; #ifdef CONFIG_IP_MULTICAST in_dev->mr_gq_running = 0; @@ -1347,6 +1357,9 @@ void ip_mc_up(struct in_device *in_dev) ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); for (i=in_dev->mc_list; i; i=i->next) @@ -1363,6 +1376,9 @@ void ip_mc_destroy_dev(struct in_device *in_dev) ASSERT_RTNL(); + if (!net_eq(dev_net(in_dev->dev), &init_net)) + return; + /* Deactivate timers */ ip_mc_down(in_dev); @@ -1379,7 +1395,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev) write_unlock_bh(&in_dev->mc_list_lock); } -static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) +static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = imr->imr_multiaddr.s_addr } } }; @@ -1388,19 +1404,19 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) struct in_device *idev = NULL; if (imr->imr_ifindex) { - idev = inetdev_by_index(net, imr->imr_ifindex); + idev = inetdev_by_index(&init_net, imr->imr_ifindex); if (idev) __in_dev_put(idev); return idev; } if (imr->imr_address.s_addr) { - dev = ip_dev_find(net, imr->imr_address.s_addr); + dev = ip_dev_find(&init_net, imr->imr_address.s_addr); if (!dev) return NULL; dev_put(dev); } - if (!dev && !ip_route_output_key(net, &rt, &fl)) { + if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) { dev = rt->u.dst.dev; ip_rt_put(rt); } @@ -1738,16 +1754,18 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) struct ip_mc_socklist *iml=NULL, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); - struct net *net = sock_net(sk); int ifindex; int count = 0; if (!ipv4_is_multicast(addr)) return -EINVAL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); - in_dev = ip_mc_find_dev(net, imr); + in_dev = ip_mc_find_dev(imr); if (!in_dev) { iml = NULL; @@ -1809,13 +1827,15 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml, **imlp; struct in_device *in_dev; - struct net *net = sock_net(sk); __be32 group = imr->imr_multiaddr.s_addr; u32 ifindex; int ret = -EADDRNOTAVAIL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); - in_dev = ip_mc_find_dev(net, imr); + in_dev = ip_mc_find_dev(imr); ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { if (iml->multi.imr_multiaddr.s_addr != group) @@ -1853,19 +1873,21 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct struct in_device *in_dev = NULL; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; - struct net *net = sock_net(sk); int leavegroup = 0; int i, j, rv; if (!ipv4_is_multicast(addr)) return -EINVAL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; imr.imr_address.s_addr = mreqs->imr_interface; imr.imr_ifindex = ifindex; - in_dev = ip_mc_find_dev(net, &imr); + in_dev = ip_mc_find_dev(&imr); if (!in_dev) { err = -ENODEV; @@ -1985,7 +2007,6 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *newpsl, *psl; - struct net *net = sock_net(sk); int leavegroup = 0; if (!ipv4_is_multicast(addr)) @@ -1994,12 +2015,15 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) msf->imsf_fmode != MCAST_EXCLUDE) return -EINVAL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_address.s_addr = msf->imsf_interface; imr.imr_ifindex = ifindex; - in_dev = ip_mc_find_dev(net, &imr); + in_dev = ip_mc_find_dev(&imr); if (!in_dev) { err = -ENODEV; @@ -2070,17 +2094,19 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; - struct net *net = sock_net(sk); if (!ipv4_is_multicast(addr)) return -EINVAL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_address.s_addr = msf->imsf_interface; imr.imr_ifindex = 0; - in_dev = ip_mc_find_dev(net, &imr); + in_dev = ip_mc_find_dev(&imr); if (!in_dev) { err = -ENODEV; @@ -2137,6 +2163,9 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, if (!ipv4_is_multicast(addr)) return -EINVAL; + if (!net_eq(sock_net(sk), &init_net)) + return -EPROTONOSUPPORT; + rtnl_lock(); err = -EADDRNOTAVAIL; @@ -2217,17 +2246,19 @@ void ip_mc_drop_socket(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; - struct net *net = sock_net(sk); if (inet->mc_list == NULL) return; + if (!net_eq(sock_net(sk), &init_net)) + return; + rtnl_lock(); while ((iml = inet->mc_list) != NULL) { struct in_device *in_dev; inet->mc_list = iml->next; - in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); + in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex); (void) ip_mc_leave_src(sk, iml, in_dev); if (in_dev != NULL) { ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); diff --git a/trunk/net/ipv4/ipvs/ip_vs_app.c b/trunk/net/ipv4/ipvs/ip_vs_app.c index 201b8ea3020d..1f1897a1a702 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_app.c +++ b/trunk/net/ipv4/ipvs/ip_vs_app.c @@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, } -int __init ip_vs_app_init(void) +int ip_vs_app_init(void) { /* we will replace it with proc_net_ipvs_create() soon */ proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); diff --git a/trunk/net/ipv4/ipvs/ip_vs_conn.c b/trunk/net/ipv4/ipvs/ip_vs_conn.c index 44a6872dc245..f8bdae47a77f 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_conn.c +++ b/trunk/net/ipv4/ipvs/ip_vs_conn.c @@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void) } -int __init ip_vs_conn_init(void) +int ip_vs_conn_init(void) { int idx; diff --git a/trunk/net/ipv4/ipvs/ip_vs_ctl.c b/trunk/net/ipv4/ipvs/ip_vs_ctl.c index 6379705a8dcb..9a5ace0b4dd6 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_ctl.c +++ b/trunk/net/ipv4/ipvs/ip_vs_ctl.c @@ -683,22 +683,9 @@ static void ip_vs_zero_stats(struct ip_vs_stats *stats) { spin_lock_bh(&stats->lock); - - stats->conns = 0; - stats->inpkts = 0; - stats->outpkts = 0; - stats->inbytes = 0; - stats->outbytes = 0; - - stats->cps = 0; - stats->inpps = 0; - stats->outpps = 0; - stats->inbps = 0; - stats->outbps = 0; - - ip_vs_zero_estimator(stats); - + memset(stats, 0, (char *)&stats->lock - (char *)stats); spin_unlock_bh(&stats->lock); + ip_vs_zero_estimator(stats); } /* @@ -1602,7 +1589,7 @@ static struct ctl_table vs_vars[] = { { .ctl_name = 0 } }; -const struct ctl_path net_vs_ctl_path[] = { +struct ctl_path net_vs_ctl_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, { .procname = "vs", }, @@ -1797,9 +1784,7 @@ static const struct file_operations ip_vs_info_fops = { #endif -struct ip_vs_stats ip_vs_stats = { - .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock), -}; +struct ip_vs_stats ip_vs_stats; #ifdef CONFIG_PROC_FS static int ip_vs_stats_show(struct seq_file *seq, void *v) @@ -2321,7 +2306,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = { }; -int __init ip_vs_control_init(void) +int ip_vs_control_init(void) { int ret; int idx; @@ -2348,6 +2333,8 @@ int __init ip_vs_control_init(void) INIT_LIST_HEAD(&ip_vs_rtable[idx]); } + memset(&ip_vs_stats, 0, sizeof(ip_vs_stats)); + spin_lock_init(&ip_vs_stats.lock); ip_vs_new_estimator(&ip_vs_stats); /* Hook the defense timer */ diff --git a/trunk/net/ipv4/ipvs/ip_vs_dh.c b/trunk/net/ipv4/ipvs/ip_vs_dh.c index fa66824d264f..8afc1503ed20 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_dh.c +++ b/trunk/net/ipv4/ipvs/ip_vs_dh.c @@ -233,7 +233,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = .name = "dh", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), .init_service = ip_vs_dh_init_svc, .done_service = ip_vs_dh_done_svc, .update_service = ip_vs_dh_update_svc, @@ -243,6 +242,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = static int __init ip_vs_dh_init(void) { + INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_dh_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_est.c b/trunk/net/ipv4/ipvs/ip_vs_est.c index 5a20f93bd7f9..bc04eedd6dbb 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_est.c +++ b/trunk/net/ipv4/ipvs/ip_vs_est.c @@ -17,7 +17,6 @@ #include #include #include -#include #include @@ -45,11 +44,28 @@ */ -static void estimation_timer(unsigned long arg); +struct ip_vs_estimator +{ + struct ip_vs_estimator *next; + struct ip_vs_stats *stats; + + u32 last_conns; + u32 last_inpkts; + u32 last_outpkts; + u64 last_inbytes; + u64 last_outbytes; + + u32 cps; + u32 inpps; + u32 outpps; + u32 inbps; + u32 outbps; +}; + -static LIST_HEAD(est_list); -static DEFINE_SPINLOCK(est_lock); -static DEFINE_TIMER(est_timer, estimation_timer, 0, 0); +static struct ip_vs_estimator *est_list = NULL; +static DEFINE_RWLOCK(est_lock); +static struct timer_list est_timer; static void estimation_timer(unsigned long arg) { @@ -60,9 +76,9 @@ static void estimation_timer(unsigned long arg) u64 n_inbytes, n_outbytes; u32 rate; - spin_lock(&est_lock); - list_for_each_entry(e, &est_list, list) { - s = container_of(e, struct ip_vs_stats, est); + read_lock(&est_lock); + for (e = est_list; e; e = e->next) { + s = e->stats; spin_lock(&s->lock); n_conns = s->conns; @@ -98,16 +114,19 @@ static void estimation_timer(unsigned long arg) s->outbps = (e->outbps+0xF)>>5; spin_unlock(&s->lock); } - spin_unlock(&est_lock); + read_unlock(&est_lock); mod_timer(&est_timer, jiffies + 2*HZ); } -void ip_vs_new_estimator(struct ip_vs_stats *stats) +int ip_vs_new_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *est = &stats->est; + struct ip_vs_estimator *est; - INIT_LIST_HEAD(&est->list); + est = kzalloc(sizeof(*est), GFP_KERNEL); + if (est == NULL) + return -ENOMEM; + est->stats = stats; est->last_conns = stats->conns; est->cps = stats->cps<<10; @@ -123,40 +142,59 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats) est->last_outbytes = stats->outbytes; est->outbps = stats->outbps<<5; - spin_lock_bh(&est_lock); - if (list_empty(&est_list)) - mod_timer(&est_timer, jiffies + 2 * HZ); - list_add(&est->list, &est_list); - spin_unlock_bh(&est_lock); + write_lock_bh(&est_lock); + est->next = est_list; + if (est->next == NULL) { + setup_timer(&est_timer, estimation_timer, 0); + est_timer.expires = jiffies + 2*HZ; + add_timer(&est_timer); + } + est_list = est; + write_unlock_bh(&est_lock); + return 0; } void ip_vs_kill_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *est = &stats->est; - - spin_lock_bh(&est_lock); - list_del(&est->list); - while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) { - spin_unlock_bh(&est_lock); - cpu_relax(); - spin_lock_bh(&est_lock); + struct ip_vs_estimator *est, **pest; + int killed = 0; + + write_lock_bh(&est_lock); + pest = &est_list; + while ((est=*pest) != NULL) { + if (est->stats != stats) { + pest = &est->next; + continue; + } + *pest = est->next; + kfree(est); + killed++; } - spin_unlock_bh(&est_lock); + if (killed && est_list == NULL) + del_timer_sync(&est_timer); + write_unlock_bh(&est_lock); } void ip_vs_zero_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *est = &stats->est; - - /* set counters zero, caller must hold the stats->lock lock */ - est->last_inbytes = 0; - est->last_outbytes = 0; - est->last_conns = 0; - est->last_inpkts = 0; - est->last_outpkts = 0; - est->cps = 0; - est->inpps = 0; - est->outpps = 0; - est->inbps = 0; - est->outbps = 0; + struct ip_vs_estimator *e; + + write_lock_bh(&est_lock); + for (e = est_list; e; e = e->next) { + if (e->stats != stats) + continue; + + /* set counters zero */ + e->last_conns = 0; + e->last_inpkts = 0; + e->last_outpkts = 0; + e->last_inbytes = 0; + e->last_outbytes = 0; + e->cps = 0; + e->inpps = 0; + e->outpps = 0; + e->inbps = 0; + e->outbps = 0; + } + write_unlock_bh(&est_lock); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_lblc.c b/trunk/net/ipv4/ipvs/ip_vs_lblc.c index 7a6a319f544a..0efa3db4b180 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_lblc.c +++ b/trunk/net/ipv4/ipvs/ip_vs_lblc.c @@ -539,7 +539,6 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = .name = "lblc", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), .init_service = ip_vs_lblc_init_svc, .done_service = ip_vs_lblc_done_svc, .update_service = ip_vs_lblc_update_svc, @@ -551,6 +550,7 @@ static int __init ip_vs_lblc_init(void) { int ret; + INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list); sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); if (ret) diff --git a/trunk/net/ipv4/ipvs/ip_vs_lblcr.c b/trunk/net/ipv4/ipvs/ip_vs_lblcr.c index c234e73968a6..8e3bbeb45138 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/trunk/net/ipv4/ipvs/ip_vs_lblcr.c @@ -728,7 +728,6 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = .name = "lblcr", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), .init_service = ip_vs_lblcr_init_svc, .done_service = ip_vs_lblcr_done_svc, .update_service = ip_vs_lblcr_update_svc, @@ -740,6 +739,7 @@ static int __init ip_vs_lblcr_init(void) { int ret; + INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); if (ret) diff --git a/trunk/net/ipv4/ipvs/ip_vs_lc.c b/trunk/net/ipv4/ipvs/ip_vs_lc.c index ebcdbf75ac65..ac9f08e065d5 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_lc.c +++ b/trunk/net/ipv4/ipvs/ip_vs_lc.c @@ -98,7 +98,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { .name = "lc", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), .init_service = ip_vs_lc_init_svc, .done_service = ip_vs_lc_done_svc, .update_service = ip_vs_lc_update_svc, @@ -108,6 +107,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { static int __init ip_vs_lc_init(void) { + INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; } diff --git a/trunk/net/ipv4/ipvs/ip_vs_nq.c b/trunk/net/ipv4/ipvs/ip_vs_nq.c index 92f3a6770031..a46bf258d420 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_nq.c +++ b/trunk/net/ipv4/ipvs/ip_vs_nq.c @@ -136,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = .name = "nq", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), .init_service = ip_vs_nq_init_svc, .done_service = ip_vs_nq_done_svc, .update_service = ip_vs_nq_update_svc, @@ -146,6 +145,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = static int __init ip_vs_nq_init(void) { + INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_nq_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_proto.c b/trunk/net/ipv4/ipvs/ip_vs_proto.c index 6099a88fc200..876714f23d65 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_proto.c +++ b/trunk/net/ipv4/ipvs/ip_vs_proto.c @@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; /* * register an ipvs protocol */ -static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) +static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) { unsigned hash = IP_VS_PROTO_HASH(pp->protocol); @@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, } -int __init ip_vs_protocol_init(void) +int ip_vs_protocol_init(void) { char protocols[64]; #define REGISTER_PROTOCOL(p) \ diff --git a/trunk/net/ipv4/ipvs/ip_vs_rr.c b/trunk/net/ipv4/ipvs/ip_vs_rr.c index 358110d17e59..c8db12d39e61 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_rr.c +++ b/trunk/net/ipv4/ipvs/ip_vs_rr.c @@ -94,7 +94,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { .name = "rr", /* name */ .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), .init_service = ip_vs_rr_init_svc, .done_service = ip_vs_rr_done_svc, .update_service = ip_vs_rr_update_svc, @@ -103,6 +102,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { static int __init ip_vs_rr_init(void) { + INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_rr_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_sched.c b/trunk/net/ipv4/ipvs/ip_vs_sched.c index a46ad9e35016..b64767309855 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_sched.c +++ b/trunk/net/ipv4/ipvs/ip_vs_sched.c @@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) write_lock_bh(&__ip_vs_sched_lock); - if (!list_empty(&scheduler->n_list)) { + if (scheduler->n_list.next != &scheduler->n_list) { write_unlock_bh(&__ip_vs_sched_lock); ip_vs_use_count_dec(); IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " @@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) } write_lock_bh(&__ip_vs_sched_lock); - if (list_empty(&scheduler->n_list)) { + if (scheduler->n_list.next == &scheduler->n_list) { write_unlock_bh(&__ip_vs_sched_lock); IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " "is not in the list. failed\n", scheduler->name); diff --git a/trunk/net/ipv4/ipvs/ip_vs_sed.c b/trunk/net/ipv4/ipvs/ip_vs_sed.c index 77663d84cbd1..2a7d31358181 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_sed.c +++ b/trunk/net/ipv4/ipvs/ip_vs_sed.c @@ -138,7 +138,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = .name = "sed", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), .init_service = ip_vs_sed_init_svc, .done_service = ip_vs_sed_done_svc, .update_service = ip_vs_sed_update_svc, @@ -148,6 +147,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = static int __init ip_vs_sed_init(void) { + INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_sed_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_sh.c b/trunk/net/ipv4/ipvs/ip_vs_sh.c index 7b979e228056..b8fdfac65001 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_sh.c +++ b/trunk/net/ipv4/ipvs/ip_vs_sh.c @@ -230,7 +230,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = .name = "sh", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), .init_service = ip_vs_sh_init_svc, .done_service = ip_vs_sh_done_svc, .update_service = ip_vs_sh_update_svc, @@ -240,6 +239,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = static int __init ip_vs_sh_init(void) { + INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_sh_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_sync.c b/trunk/net/ipv4/ipvs/ip_vs_sync.c index a652da2c3200..45e9bd96c286 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_sync.c +++ b/trunk/net/ipv4/ipvs/ip_vs_sync.c @@ -904,9 +904,9 @@ int stop_sync_thread(int state) * progress of stopping the master sync daemon. */ - spin_lock_bh(&ip_vs_sync_lock); + spin_lock(&ip_vs_sync_lock); ip_vs_sync_state &= ~IP_VS_STATE_MASTER; - spin_unlock_bh(&ip_vs_sync_lock); + spin_unlock(&ip_vs_sync_lock); kthread_stop(sync_master_thread); sync_master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { diff --git a/trunk/net/ipv4/ipvs/ip_vs_wlc.c b/trunk/net/ipv4/ipvs/ip_vs_wlc.c index 9b0ef86bb1f7..772c3cb4eca1 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_wlc.c +++ b/trunk/net/ipv4/ipvs/ip_vs_wlc.c @@ -126,7 +126,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = .name = "wlc", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), .init_service = ip_vs_wlc_init_svc, .done_service = ip_vs_wlc_done_svc, .update_service = ip_vs_wlc_update_svc, @@ -136,6 +135,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = static int __init ip_vs_wlc_init(void) { + INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); } diff --git a/trunk/net/ipv4/ipvs/ip_vs_wrr.c b/trunk/net/ipv4/ipvs/ip_vs_wrr.c index 0d86a79b87b5..1d6932d7dc97 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_wrr.c +++ b/trunk/net/ipv4/ipvs/ip_vs_wrr.c @@ -212,7 +212,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { .name = "wrr", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, - .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), .init_service = ip_vs_wrr_init_svc, .done_service = ip_vs_wrr_done_svc, .update_service = ip_vs_wrr_update_svc, @@ -221,6 +220,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { static int __init ip_vs_wrr_init(void) { + INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; } diff --git a/trunk/net/ipv4/netfilter/ipt_addrtype.c b/trunk/net/ipv4/netfilter/ipt_addrtype.c index 462a22c97877..49587a497229 100644 --- a/trunk/net/ipv4/netfilter/ipt_addrtype.c +++ b/trunk/net/ipv4/netfilter/ipt_addrtype.c @@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in, (info->flags & IPT_ADDRTYPE_INVERT_SOURCE); if (ret && info->dest) ret &= match_type(dev, iph->daddr, info->dest) ^ - !!(info->flags & IPT_ADDRTYPE_INVERT_DEST); + (info->flags & IPT_ADDRTYPE_INVERT_DEST); return ret; } diff --git a/trunk/net/ipv4/netfilter/nf_nat_proto_common.c b/trunk/net/ipv4/netfilter/nf_nat_proto_common.c index 6c4f11f51446..91537f11273f 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_proto_common.c +++ b/trunk/net/ipv4/netfilter/nf_nat_proto_common.c @@ -73,13 +73,9 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, range_size = ntohs(range->max.all) - min + 1; } + off = *rover; if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) - off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, - maniptype == IP_NAT_MANIP_SRC - ? tuple->dst.u.all - : tuple->src.u.all); - else - off = *rover; + off = net_random(); for (i = 0; i < range_size; i++, off++) { *portptr = htons(min + off % range_size); diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index cca921ea8550..16fc6f454a31 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -2914,68 +2914,6 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table, return 0; } -static void rt_secret_reschedule(int old) -{ - struct net *net; - int new = ip_rt_secret_interval; - int diff = new - old; - - if (!diff) - return; - - rtnl_lock(); - for_each_net(net) { - int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); - - if (!new) - continue; - - if (deleted) { - long time = net->ipv4.rt_secret_timer.expires - jiffies; - - if (time <= 0 || (time += diff) <= 0) - time = 0; - - net->ipv4.rt_secret_timer.expires = time; - } else - net->ipv4.rt_secret_timer.expires = new; - - net->ipv4.rt_secret_timer.expires += jiffies; - add_timer(&net->ipv4.rt_secret_timer); - } - rtnl_unlock(); -} - -static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write, - struct file *filp, - void __user *buffer, size_t *lenp, - loff_t *ppos) -{ - int old = ip_rt_secret_interval; - int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos); - - rt_secret_reschedule(old); - - return ret; -} - -static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table, - int __user *name, - int nlen, - void __user *oldval, - size_t __user *oldlenp, - void __user *newval, - size_t newlen) -{ - int old = ip_rt_secret_interval; - int ret = sysctl_jiffies(table, name, nlen, oldval, oldlenp, newval, - newlen); - - rt_secret_reschedule(old); - - return ret; -} - static ctl_table ipv4_route_table[] = { { .ctl_name = NET_IPV4_ROUTE_GC_THRESH, @@ -3110,8 +3048,8 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_secret_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &ipv4_sysctl_rt_secret_interval, - .strategy = &ipv4_sysctl_rt_secret_interval_strategy, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, }, { .ctl_name = 0 } }; @@ -3188,12 +3126,10 @@ static __net_init int rt_secret_timer_init(struct net *net) net->ipv4.rt_secret_timer.data = (unsigned long)net; init_timer_deferrable(&net->ipv4.rt_secret_timer); - if (ip_rt_secret_interval) { - net->ipv4.rt_secret_timer.expires = - jiffies + net_random() % ip_rt_secret_interval + - ip_rt_secret_interval; - add_timer(&net->ipv4.rt_secret_timer); - } + net->ipv4.rt_secret_timer.expires = + jiffies + net_random() % ip_rt_secret_interval + + ip_rt_secret_interval; + add_timer(&net->ipv4.rt_secret_timer); return 0; } diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index 8e42fbbd5761..383d17359d01 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -989,9 +989,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) up->encap_rcv != NULL) { int ret; - bh_unlock_sock(sk); ret = (*up->encap_rcv)(sk, skb); - bh_lock_sock(sk); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, @@ -1094,7 +1092,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (skb1) { int ret = 0; - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) ret = udp_queue_rcv_skb(sk, skb1); else @@ -1196,7 +1194,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (sk != NULL) { int ret = 0; - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) ret = udp_queue_rcv_skb(sk, skb); else diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index e2d3b7580b76..a7842c54f58a 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -1106,12 +1106,13 @@ static int ipv6_get_saddr_eval(struct net *net, return ret; } -int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, +int ipv6_dev_get_saddr(struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) { struct ipv6_saddr_score scores[2], *score = &scores[0], *hiscore = &scores[1]; + struct net *net = dev_net(dst_dev); struct ipv6_saddr_dst dst; struct net_device *dev; int dst_type; diff --git a/trunk/net/ipv6/fib6_rules.c b/trunk/net/ipv6/fib6_rules.c index f5de3f9dc692..8d05527524e3 100644 --- a/trunk/net/ipv6/fib6_rules.c +++ b/trunk/net/ipv6/fib6_rules.c @@ -93,8 +93,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if (flags & RT6_LOOKUP_F_SRCPREF_COA) srcprefs |= IPV6_PREFER_SRC_COA; - if (ipv6_dev_get_saddr(net, - ip6_dst_idev(&rt->u.dst)->dev, + if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, &flp->fl6_dst, srcprefs, &saddr)) goto again; diff --git a/trunk/net/ipv6/ip6_fib.c b/trunk/net/ipv6/ip6_fib.c index 29c7c99e69f7..52dddc25d3e6 100644 --- a/trunk/net/ipv6/ip6_fib.c +++ b/trunk/net/ipv6/ip6_fib.c @@ -378,7 +378,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) arg.skb = skb; arg.cb = cb; - arg.net = net; w->args = &arg; for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index 0e844c2736a7..a4402de425d9 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -934,7 +934,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, goto out_err_release; if (ipv6_addr_any(&fl->fl6_src)) { - err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, + err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev, &fl->fl6_dst, sk ? inet6_sk(sk)->srcprefs : 0, &fl->fl6_src); diff --git a/trunk/net/ipv6/ipv6_sockglue.c b/trunk/net/ipv6/ipv6_sockglue.c index 4e5eac301f91..741cfcd96f88 100644 --- a/trunk/net/ipv6/ipv6_sockglue.c +++ b/trunk/net/ipv6/ipv6_sockglue.c @@ -911,7 +911,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, } else { if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; - src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; + src_info.ipi6_ifindex = np->mcast_oif; ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } @@ -921,7 +921,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, } if (np->rxopt.bits.rxoinfo) { struct in6_pktinfo src_info; - src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; + src_info.ipi6_ifindex = np->mcast_oif; ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c index f1c62ba0f56b..beb48e3f038a 100644 --- a/trunk/net/ipv6/ndisc.c +++ b/trunk/net/ipv6/ndisc.c @@ -549,7 +549,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, override = 0; in6_ifa_put(ifp); } else { - if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, + if (ipv6_dev_get_saddr(dev, daddr, inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, &tmpaddr)) return; diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 9af6115f0f50..5a3e87e4b18f 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -2106,8 +2106,7 @@ static inline size_t rt6_nlmsg_size(void) + nla_total_size(sizeof(struct rta_cacheinfo)); } -static int rt6_fill_node(struct net *net, - struct sk_buff *skb, struct rt6_info *rt, +static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 pid, u32 seq, int prefix, int nowait, unsigned int flags) @@ -2188,9 +2187,8 @@ static int rt6_fill_node(struct net *net, #endif NLA_PUT_U32(skb, RTA_IIF, iif); } else if (dst) { - struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); struct in6_addr saddr_buf; - if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, + if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, dst, 0, &saddr_buf) == 0) NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } @@ -2235,8 +2233,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) } else prefix = 0; - return rt6_fill_node(arg->net, - arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, + return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, prefix, 0, NLM_F_MULTI); } @@ -2302,7 +2299,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); skb->dst = &rt->u.dst; - err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, + err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 0, 0, 0); if (err < 0) { @@ -2329,7 +2326,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) if (skb == NULL) goto errout; - err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, + err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, info->pid, seq, 0, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index a6aecf76a71b..d1477b350f76 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, uh->source, saddr, dif))) { struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); if (buff) { - bh_lock_sock(sk2); + bh_lock_sock_nested(sk2); if (!sock_owned_by_user(sk2)) udpv6_queue_rcv_skb(sk2, buff); else @@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, bh_unlock_sock(sk2); } } - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else @@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], /* deliver */ - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else diff --git a/trunk/net/ipv6/xfrm6_policy.c b/trunk/net/ipv6/xfrm6_policy.c index 08e4cbbe3f04..8f1e0543b3c4 100644 --- a/trunk/net/ipv6/xfrm6_policy.c +++ b/trunk/net/ipv6/xfrm6_policy.c @@ -52,14 +52,12 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr, static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr) { struct dst_entry *dst; - struct net_device *dev; dst = xfrm6_dst_lookup(0, NULL, daddr); if (IS_ERR(dst)) return -EHOSTUNREACH; - dev = ip6_dst_idev(dst)->dev; - ipv6_dev_get_saddr(dev_net(dev), dev, + ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev, (struct in6_addr *)&daddr->a6, 0, (struct in6_addr *)&saddr->a6); dst_release(dst); diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 1e97fb9fb34b..e1d11c9b6729 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -2103,8 +2103,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); return; } - /* update new sta with its last rx activity */ - sta->last_rx = jiffies; } /* diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index a8752031adcb..105a616c5c78 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -968,7 +968,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) /* need to zero data of old helper */ memset(&help->help, 0, sizeof(help->help)); } else { - help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); + help = nf_ct_helper_ext_add(ct, GFP_KERNEL); if (help == NULL) return -ENOMEM; } @@ -1136,33 +1136,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[], ct->timeout.expires = jiffies + ct->timeout.expires * HZ; ct->status |= IPS_CONFIRMED; - rcu_read_lock(); - helper = __nf_ct_helper_find(rtuple); - if (helper) { - help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); - if (help == NULL) { - rcu_read_unlock(); - err = -ENOMEM; - goto err; - } - /* not in hash table yet so not strictly necessary */ - rcu_assign_pointer(help->helper, helper); - } - if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); - if (err < 0) { - rcu_read_unlock(); + if (err < 0) goto err; - } } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); - if (err < 0) { - rcu_read_unlock(); + if (err < 0) goto err; - } } nf_ct_acct_ext_add(ct, GFP_KERNEL); @@ -1172,6 +1155,19 @@ ctnetlink_create_conntrack(struct nlattr *cda[], ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); #endif + rcu_read_lock(); + helper = __nf_ct_helper_find(rtuple); + if (helper) { + help = nf_ct_helper_ext_add(ct, GFP_KERNEL); + if (help == NULL) { + rcu_read_unlock(); + err = -ENOMEM; + goto err; + } + /* not in hash table yet so not strictly necessary */ + rcu_assign_pointer(help->helper, helper); + } + /* setup master conntrack: this is a confirmed expectation */ if (master_ct) { __set_bit(IPS_EXPECTED_BIT, &ct->status); diff --git a/trunk/net/rfkill/rfkill.c b/trunk/net/rfkill/rfkill.c index 35a9994e2339..d2d45655cd1a 100644 --- a/trunk/net/rfkill/rfkill.c +++ b/trunk/net/rfkill/rfkill.c @@ -150,8 +150,6 @@ static void update_rfkill_state(struct rfkill *rfkill) * calls and handling all the red tape such as issuing notifications * if the call is successful. * - * Suspended devices are not touched at all, and -EAGAIN is returned. - * * Note that the @force parameter cannot override a (possibly cached) * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of * RFKILL_STATE_HARD_BLOCKED implements either get_state() or @@ -170,9 +168,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, int retval = 0; enum rfkill_state oldstate, newstate; - if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) - return -EBUSY; - oldstate = rfkill->state; if (rfkill->get_state && !force && @@ -219,7 +214,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, * * This function toggles the state of all switches of given type, * unless a specific switch is claimed by userspace (in which case, - * that switch is left alone) or suspended. + * that switch is left alone). */ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) { @@ -244,8 +239,8 @@ EXPORT_SYMBOL(rfkill_switch_all); /** * rfkill_epo - emergency power off all transmitters * - * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, - * ignoring everything in its path but rfkill_mutex and rfkill->mutex. + * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring + * everything in its path but rfkill_mutex and rfkill->mutex. */ void rfkill_epo(void) { @@ -463,14 +458,13 @@ static int rfkill_resume(struct device *dev) if (dev->power.power_state.event != PM_EVENT_ON) { mutex_lock(&rfkill->mutex); - dev->power.power_state.event = PM_EVENT_ON; - /* restore radio state AND notify everybody */ rfkill_toggle_radio(rfkill, rfkill->state, 1); mutex_unlock(&rfkill->mutex); } + dev->power.power_state = PMSG_ON; return 0; } #else diff --git a/trunk/net/rxrpc/ar-accept.c b/trunk/net/rxrpc/ar-accept.c index 77228f28fa36..bdfb77417794 100644 --- a/trunk/net/rxrpc/ar-accept.c +++ b/trunk/net/rxrpc/ar-accept.c @@ -100,7 +100,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, trans = rxrpc_get_transport(local, peer, GFP_NOIO); rxrpc_put_peer(peer); - if (IS_ERR(trans)) { + if (!trans) { _debug("no trans"); ret = -EBUSY; goto error; diff --git a/trunk/net/sched/act_api.c b/trunk/net/sched/act_api.c index 9974b3f04f05..26c7e1f9a350 100644 --- a/trunk/net/sched/act_api.c +++ b/trunk/net/sched/act_api.c @@ -751,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; struct tc_action *a = create_a(0); - int err = -ENOMEM; + int err = -EINVAL; if (a == NULL) { printk("tca_action_flush: couldnt create tc_action\n"); @@ -762,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) if (!skb) { printk("tca_action_flush: failed skb alloc\n"); kfree(a); - return err; + return -ENOBUFS; } b = skb_tail_pointer(skb); @@ -790,8 +790,6 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto nla_put_failure; - if (err == 0) - goto noflush_out; nla_nest_end(skb, nest); @@ -809,7 +807,6 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) nlmsg_failure: module_put(a->ops->owner); err_out: -noflush_out: kfree_skb(skb); kfree(a); return err; @@ -827,10 +824,8 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) return ret; if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { - if (tb[1] != NULL) - return tca_action_flush(tb[1], n, pid); - else - return -EINVAL; + if (tb[0] != NULL && tb[1] == NULL) + return tca_action_flush(tb[0], n, pid); } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { diff --git a/trunk/net/sched/cls_api.c b/trunk/net/sched/cls_api.c index 5cafdd4c8018..d2b6f54a6261 100644 --- a/trunk/net/sched/cls_api.c +++ b/trunk/net/sched/cls_api.c @@ -280,7 +280,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { spin_lock_bh(root_lock); *back = tp->next; - spin_unlock_bh(root_lock); + spin_lock_bh(root_lock); tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); tcf_destroy(tp); diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index ef0efeca6352..ba1d121f3127 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -184,21 +183,6 @@ EXPORT_SYMBOL(unregister_qdisc); (root qdisc, all its children, children of children etc.) */ -struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) -{ - struct Qdisc *q; - - if (!(root->flags & TCQ_F_BUILTIN) && - root->handle == handle) - return root; - - list_for_each_entry(q, &root->list, list) { - if (q->handle == handle) - return q; - } - return NULL; -} - struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; @@ -207,11 +191,16 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) struct netdev_queue *txq = netdev_get_tx_queue(dev, i); struct Qdisc *q, *txq_root = txq->qdisc_sleeping; - q = qdisc_match_from_root(txq_root, handle); - if (q) - return q; + if (!(txq_root->flags & TCQ_F_BUILTIN) && + txq_root->handle == handle) + return txq_root; + + list_for_each_entry(q, &txq_root->list, list) { + if (q->handle == handle) + return q; + } } - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + return NULL; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -427,7 +416,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) wd->qdisc->flags &= ~TCQ_F_THROTTLED; smp_wmb(); - __netif_schedule(qdisc_root(wd->qdisc)); + __netif_schedule(wd->qdisc); return HRTIMER_NORESTART; } @@ -638,8 +627,11 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid if (new || old) qdisc_notify(skb, n, clid, old, new); - if (old) + if (old) { + spin_lock_bh(&old->q.lock); qdisc_destroy(old); + spin_unlock_bh(&old->q.lock); + } } /* Graft qdisc "new" to class "classid" of qdisc "parent" or @@ -705,10 +697,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, return err; } -/* lockdep annotation is needed for ingress; egress gets it only for name */ -static struct lock_class_key qdisc_tx_lock; -static struct lock_class_key qdisc_rx_lock; - /* Allocate and initialize new qdisc. @@ -769,7 +757,6 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; handle = TC_H_MAKE(TC_H_INGRESS, 0); - lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); } else { if (handle == 0) { handle = qdisc_alloc_handle(dev); @@ -777,7 +764,6 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (handle == 0) goto err_out3; } - lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); } sch->handle = handle; @@ -922,7 +908,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return -ENOENT; q = qdisc_leaf(p, clid); } else { /* ingress */ - q = dev->rx_queue.qdisc_sleeping; + q = dev->rx_queue.qdisc; } } else { struct netdev_queue *dev_queue; @@ -992,7 +978,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return -ENOENT; q = qdisc_leaf(p, clid); } else { /*ingress */ - q = dev->rx_queue.qdisc_sleeping; + q = dev->rx_queue.qdisc; } } else { struct netdev_queue *dev_queue; @@ -1088,13 +1074,20 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } graft: - err = qdisc_graft(dev, p, skb, n, clid, q, NULL); - if (err) { - if (q) - qdisc_destroy(q); - return err; + if (1) { + spinlock_t *root_lock; + + err = qdisc_graft(dev, p, skb, n, clid, q, NULL); + if (err) { + if (q) { + root_lock = qdisc_root_lock(q); + spin_lock_bh(root_lock); + qdisc_destroy(q); + spin_unlock_bh(root_lock); + } + return err; + } } - return 0; } @@ -1536,11 +1529,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) t = 0; dev_queue = netdev_get_tx_queue(dev, 0); - if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) + if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) goto done; dev_queue = &dev->rx_queue; - if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) + if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) goto done; done: diff --git a/trunk/net/sched/sch_cbq.c b/trunk/net/sched/sch_cbq.c index 47ef492c4ff4..4e261ce62f48 100644 --- a/trunk/net/sched/sch_cbq.c +++ b/trunk/net/sched/sch_cbq.c @@ -654,7 +654,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) } sch->flags &= ~TCQ_F_THROTTLED; - __netif_schedule(qdisc_root(sch)); + __netif_schedule(sch); return HRTIMER_NORESTART; } diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index c3ed4d44fc14..7cf83b37459d 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -518,17 +518,14 @@ void qdisc_reset(struct Qdisc *qdisc) } EXPORT_SYMBOL(qdisc_reset); -void qdisc_destroy(struct Qdisc *qdisc) +/* this is the rcu callback function to clean up a qdisc when there + * are no further references to it */ + +static void __qdisc_destroy(struct rcu_head *head) { + struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); const struct Qdisc_ops *ops = qdisc->ops; - if (qdisc->flags & TCQ_F_BUILTIN || - !atomic_dec_and_test(&qdisc->refcnt)) - return; - - if (qdisc->parent) - list_del(&qdisc->list); - #ifdef CONFIG_NET_SCHED qdisc_put_stab(qdisc->stab); #endif @@ -545,6 +542,20 @@ void qdisc_destroy(struct Qdisc *qdisc) kfree((char *) qdisc - qdisc->padded); } + +/* Under qdisc_lock(qdisc) and BH! */ + +void qdisc_destroy(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN || + !atomic_dec_and_test(&qdisc->refcnt)) + return; + + if (qdisc->parent) + list_del(&qdisc->list); + + call_rcu(&qdisc->q_rcu, __qdisc_destroy); +} EXPORT_SYMBOL(qdisc_destroy); static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) @@ -586,9 +597,6 @@ static void transition_one_qdisc(struct net_device *dev, struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; int *need_watchdog_p = _need_watchdog; - if (!(new_qdisc->flags & TCQ_F_BUILTIN)) - clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); - rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p && new_qdisc != &noqueue_qdisc) *need_watchdog_p = 1; @@ -632,9 +640,6 @@ static void dev_deactivate_queue(struct net_device *dev, if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); - if (!(qdisc->flags & TCQ_F_BUILTIN)) - set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); - dev_queue->qdisc = qdisc_default; qdisc_reset(qdisc); @@ -642,7 +647,7 @@ static void dev_deactivate_queue(struct net_device *dev, } } -static bool some_qdisc_is_busy(struct net_device *dev) +static bool some_qdisc_is_running(struct net_device *dev, int lock) { unsigned int i; @@ -653,15 +658,16 @@ static bool some_qdisc_is_busy(struct net_device *dev) int val; dev_queue = netdev_get_tx_queue(dev, i); - q = dev_queue->qdisc_sleeping; + q = dev_queue->qdisc; root_lock = qdisc_lock(q); - spin_lock_bh(root_lock); + if (lock) + spin_lock_bh(root_lock); - val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || - test_bit(__QDISC_STATE_SCHED, &q->state)); + val = test_bit(__QDISC_STATE_RUNNING, &q->state); - spin_unlock_bh(root_lock); + if (lock) + spin_unlock_bh(root_lock); if (val) return true; @@ -671,6 +677,8 @@ static bool some_qdisc_is_busy(struct net_device *dev) void dev_deactivate(struct net_device *dev) { + bool running; + netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); @@ -680,8 +688,25 @@ void dev_deactivate(struct net_device *dev) synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ - while (some_qdisc_is_busy(dev)) - yield(); + do { + while (some_qdisc_is_running(dev, 0)) + yield(); + + /* + * Double-check inside queue lock to ensure that all effects + * of the queue run are visible when we return. + */ + running = some_qdisc_is_running(dev, 1); + + /* + * The running flag should never be set at this point because + * we've already set dev->qdisc to noop_qdisc *inside* the same + * pair of spin locks. That is, if any qdisc_run starts after + * our initial test it should see the noop_qdisc and then + * clear the RUNNING bit before dropping the queue lock. So + * if it is set here then we've found a bug. + */ + } while (WARN_ON_ONCE(running)); } static void dev_init_scheduler_queue(struct net_device *dev, @@ -710,10 +735,14 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { + spinlock_t *root_lock = qdisc_lock(qdisc); + dev_queue->qdisc = qdisc_default; dev_queue->qdisc_sleeping = qdisc_default; + spin_lock_bh(root_lock); qdisc_destroy(qdisc); + spin_unlock_bh(root_lock); } } diff --git a/trunk/net/sched/sch_htb.c b/trunk/net/sched/sch_htb.c index 0df0df202ed0..be35422711a3 100644 --- a/trunk/net/sched/sch_htb.c +++ b/trunk/net/sched/sch_htb.c @@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->qstats.drops++; cl->qstats.drops++; } - return ret; + return NET_XMIT_DROP; } else { cl->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; @@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) sch->qstats.drops++; cl->qstats.drops++; } - return ret; + return NET_XMIT_DROP; } else htb_activate(q, cl); @@ -1279,8 +1279,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) /* delete from hash and active; remainder in destroy_class */ qdisc_class_hash_remove(&q->clhash, &cl->common); - if (cl->parent) - cl->parent->children--; + cl->parent->children--; if (cl->prio_activity) htb_deactivate(q, cl); diff --git a/trunk/net/sched/sch_prio.c b/trunk/net/sched/sch_prio.c index a6697c686c7f..eac197610edf 100644 --- a/trunk/net/sched/sch_prio.c +++ b/trunk/net/sched/sch_prio.c @@ -113,11 +113,11 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { sch->q.qlen++; sch->qstats.requeues++; - return NET_XMIT_SUCCESS; + return 0; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; - return ret; + return NET_XMIT_DROP; } diff --git a/trunk/net/sched/sch_tbf.c b/trunk/net/sched/sch_tbf.c index 94c61598b86a..7d3b7ff3bf07 100644 --- a/trunk/net/sched/sch_tbf.c +++ b/trunk/net/sched/sch_tbf.c @@ -123,8 +123,15 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) struct tbf_sched_data *q = qdisc_priv(sch); int ret; - if (qdisc_pkt_len(skb) > q->max_size) - return qdisc_reshape_fail(skb, sch); + if (qdisc_pkt_len(skb) > q->max_size) { + sch->qstats.drops++; +#ifdef CONFIG_NET_CLS_ACT + if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) +#endif + kfree_skb(skb); + + return NET_XMIT_DROP; + } ret = qdisc_enqueue(skb, q->qdisc); if (ret != 0) { diff --git a/trunk/net/sctp/ipv6.c b/trunk/net/sctp/ipv6.c index 47f91afa0211..483a01d0740a 100644 --- a/trunk/net/sctp/ipv6.c +++ b/trunk/net/sctp/ipv6.c @@ -319,8 +319,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, __func__, asoc, dst, NIP6(daddr->v6.sin6_addr)); if (!asoc) { - ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)), - dst ? ip6_dst_idev(dst)->dev : NULL, + ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, &daddr->v6.sin6_addr, inet6_sk(&sk->inet.sk)->srcprefs, &saddr->v6.sin6_addr); diff --git a/trunk/net/tipc/subscr.c b/trunk/net/tipc/subscr.c index 0747d8a9232f..0326d3060bc7 100644 --- a/trunk/net/tipc/subscr.c +++ b/trunk/net/tipc/subscr.c @@ -85,7 +85,7 @@ static struct top_srv topsrv = { 0 }; static u32 htohl(u32 in, int swap) { - return swap ? swab32(in) : in; + return swap ? (u32)___constant_swab32(in) : in; } /** diff --git a/trunk/net/wireless/wext.c b/trunk/net/wireless/wext.c index d98ffb75119a..df5b3886c36b 100644 --- a/trunk/net/wireless/wext.c +++ b/trunk/net/wireless/wext.c @@ -1277,7 +1277,6 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); /* Add the wireless events in the netlink packet */ NLA_PUT(skb, IFLA_WIRELESS, event_len, event); diff --git a/trunk/net/xfrm/xfrm_output.c b/trunk/net/xfrm/xfrm_output.c index ac25b4c0e982..3f964db908a7 100644 --- a/trunk/net/xfrm/xfrm_output.c +++ b/trunk/net/xfrm/xfrm_output.c @@ -112,13 +112,16 @@ static int xfrm_output_one(struct sk_buff *skb, int err) int xfrm_output_resume(struct sk_buff *skb, int err) { while (likely((err = xfrm_output_one(skb, err)) == 0)) { + struct xfrm_state *x; + nf_reset(skb); err = skb->dst->ops->local_out(skb); if (unlikely(err != 1)) goto out; - if (!skb->dst->xfrm) + x = skb->dst->xfrm; + if (!x) return dst_output(skb); err = nf_hook(skb->dst->ops->family,